openstack

LXD: OpenStack CLI (OSC) container

# create container
lxc launch ubuntu:20.04 osc
lxc shell osc
 
# install OpenStack CLI
apt install -y python3-openstackclient python3-neutron-vpnaas python3-octaviaclient
openstack complete | sudo tee /etc/bash_completion.d/openstack
source /etc/bash_completion
 
# configure connection
mkdir -p .config/openstack
cat <<EOF> .config/openstack/clouds.yaml
clouds:
  dev-foo-app:
    auth:
      auth_url: https://keystone.service.example.com/v3
      application_credential_id: "xxxxxxxx"
      application_credential_secret: "xxxxxxxx"
    region_name: "eu-fra1"
    interface: "public"
    identity_api_version: 3
    auth_type: "v3applicationcredential"
EOF
 
echo export OS_CLOUD=dev-foo-app >> .bashrc
 
# test
export OS_CLOUD=dev-foo-app
openstack image list

Side2Side VPN connection between OpenStack VPNaaS and AVM Fritz!Box

FRITZBOX_WAN_IP=111.1.2.3
FRITZBOX_CIDR=192.168.178.0/24
PSK=PASS1234
 
openstack vpn ike policy create ikepolicy \
  --encryption-algorithm aes-256 \
  --auth-algorithm sha512 \
  --pfs group2
 
openstack vpn ipsec policy create ipsecpolicy \
  --encryption-algorithm aes-256 \
  --auth-algorithm sha512 \
  --pfs group2
 
ROUTER_ID=$(openstack router list -c ID -f value)
openstack vpn service create vpn \
  --router ${ROUTER_ID}
 
SUBNET_ID=$(openstack subnet list -c ID -f value)
openstack vpn endpoint group create ep_subnet \
  --type subnet \
  --value ${SUBNET_ID}
 
openstack vpn endpoint group create ep_cidr \
  --type cidr \
  --value ${FRITZBOX_CIDR}
 
openstack vpn ipsec site connection create conn \
  --vpnservice vpn \
  --ikepolicy ikepolicy \
  --ipsecpolicy ipsecpolicy \
  --peer-address ${FRITZBOX_WAN_IP} \
  --peer-id ${FRITZBOX_WAN_IP} \
  --psk ${PSK} \
  --local-endpoint-group ep_subnet \
  --peer-endpoint-group ep_cidr

Add ingress ssh security rule

openstack security group rule create default \
  --protocol tcp \
  --dst-port 22 \
  --remote-ip 192.168.178.0/24

Create S2S VPN connection on Fritz!Box

Site to Site IPSec VPN with strongSwan and OpenStack VPNaaS (IPsec)

Setup

# Left (Ubuntu client, behind NAT)
Ubuntu Client IP: 212.8.9.10
Ubuntu net: 192.168.178.0/24
 
# Right (OpenStack VPNaaS)
VPN_SERVICE_ID=$(openstack vpn service list -c ID -f value)
VPN_SERVICE_IP=$(openstack vpn service show ${VPN_SERVICE_ID} -c external_v4_ip -f value)
echo ${VPN_SERVICE_IP}
 
OpenStack VPN IP: 217.50.60.70
OpenStack Net: 10.0.1.0/24

Create OpenStack VPN endpoint
http://www.panticz.de/openstack/vpnaas

/etc/ipsec.secrets

217.50.60.70 : PSK "PASS1234"

/etc/ipsec.conf

config setup
 
conn vpn1
 keyexchange=ikev1
 left=%defaultroute
 leftid=212.8.9.10
 leftsubnet=192.168.178.0/24
 leftauth=psk
 leftfirewall=yes
 authby=psk
 auto=start
 ike=aes256-sha512-modp1024
 esp=aes256-sha512
 right=217.50.60.70
 rightsubnet=10.0.1.0/24
 rightauth=psk
 ikelifetime=3600s
 keylife=3600s
 type=tunnel

CLI

sudo ipsec status
sudo ipsec statusall
sudo ipsec restart
 
sudo ipsec up vpn1
sudo ipsec down vpn1
 
sudo ipsec listalgs

List

OpenStack: Output VM list with project and domain as JSON file

IFS=$(echo -en "\n\b")
 
PROJECTS_JSON=$(openstack project list --long -f json)
for PROJECT_JSON in $(echo "${PROJECTS_JSON}" | jq -c '.[]'); do
    PROJECT_ID=$(echo ${PROJECT_JSON} | jq -r .ID)
    PROJECT_NAME=$(echo ${PROJECT_JSON} | jq -r .Name)
    DOMAIN_ID=$(echo ${PROJECT_JSON} | jq -r '."Domain ID"')
    DOMAIN_JSON=$(openstack domain show  ${DOMAIN_ID} -f json)
    DOMAIN_NAME=$(echo ${DOMAIN_JSON} | jq -r .name)
 
    openstack server list --all-projects --long --project ${PROJECT_ID} --sort-column Name -f json | jq .[] | \

OpenStack Debug VPN connection

PROJECT_ID=9eaecf3b-0972-4166-806a-295f4e69fd3c
 
ROUTER_ID=$(openstack vpn service list --long -f json | jq -r ".[] | select(.Project == \"${PROJECT_ID}\").Router")
echo ${ROUTER_ID}
 
openstack port list --router ${ROUTER_ID} --device-owner network:ha_router_replicated_interface -c binding_host_id  -f value | sort -u
 
CONTROL_NODE=ewos1-ctl1-prod
ssh -t ${CONTROL_NODE} docker exec -u root -ti neutron_l3_agent bash
 
apt update
apt install -y vim
vi /var/lib/neutron/ipsec/${ROUTER_ID}/etc/strongswan.d/charon-logging.conf
 
charon {
    ...
    filelog {
        /var/log/vpn-debug-${ROUTER_ID}.log {
            append = no
            default = 2
            ike_name = yes
            time_add_ms = yes
            time_format = %b %e %T
        }
    }
    ...
}
 
 
ip netns exec qrouter-${ROUTER_ID} neutron-vpn-netns-wrapper \

Get noisy neighbor VMs

source /etc/kolla/admin-openrc.sh
 
IFS=$(echo -en "\n\b")
 
function get_vm_details() {
    LINE=$1
 
    SERVER_ID=$(echo ${LINE} | cut -d" " -f3)
    SERVER_JSON=$(openstack server show ${SERVER_ID} -f json)
    SERVER_NAME=$(echo ${SERVER_JSON} | jq -r .name)
    SERVER_PROJECT_ID=$(echo ${SERVER_JSON} | jq -r .project_id)
    SERVER_PROJECT_JSON=$(openstack project show ${SERVER_PROJECT_ID} -f json)
    SERVER_PROJECT_NAME=$(echo ${SERVER_PROJECT_JSON} | jq -r .name)
 
    echo "${LINE} ${SERVER_NAME} ${SERVER_PROJECT_NAME}"
}
 

Check OpenvSwitch

#!/bin/bash
 
export OS_ENV="@globals.environment@"
 
 
if [ "${OS_ENV}" == "dev" ]; then
    export PYENV_ROOT="$HOME/.pyenv"
    export PATH="$PYENV_ROOT/bin:$PATH"
    eval "$(pyenv init -)"
fi
 
source /etc/kolla/admin-openrc.sh
 
EXIT_CODE=0
 
# search for broken ovs entry in DB
for NODE in $(openstack compute service list -c Host -f value | sort -u); do
    OUTPUT=$(ssh ${NODE} docker exec openvswitch_vswitchd ovsdb-client dump | grep qvo | egrep -v "tag|mac" | cut -d "\"" -f2)
    for PORT in ${OUTPUT}; do
        printf "%-20s %s\n" "${NODE}" "${PORT}"
 
        EXIT_CODE=1

Check server

#!/bin/bash
 
source /etc/kolla/admin-openrc.sh
 
EXIT_CODE=0
 
# search for server with status ERROR
OUTPUT="$(openstack server list --all --status=ERROR -c ID -c Name -c Status -f value)"
#openstack server show ${SERVER_ID} -c fault -f value
if [ -n "${OUTPUT}" ]; then
    echo "${OUTPUT}"
 
    EXIT_CODE=1
fi
 
# search for server with status VERIFY_RESIZE
OUTPUT="$(openstack server list --all --status=VERIFY_RESIZE -c ID -c Name -c Status -f value)"
if [ -n "${OUTPUT}" ]; then
    echo "${OUTPUT}"
 
    EXIT_CODE=1
fi
 
# search for server processes on wrong compute node
for COMPUTE_NODE in $(openstack compute service list --service nova-compute -c Host -f value); do
    for SERVER_ID in $(ssh ${COMPUTE_NODE} pgrep qemu -a | grep -o -P '(?<=-uuid ).*(?= -smbios)'); do

OpenStack: placement (resource provider)

Install CLI client
https://docs.openstack.org/releasenotes/osc-placement/

pip install openstack-placement
 
echo 3.5.6 > .python-version

resource provider allocation

# list hypervisor
openstack resource provider list --sort-column name
 
# show
openstack resource provider allocation show ${SERVER_ID}
 
# delete
openstack resource provider allocation delete ${SERVER_ID}

Check multiple / broken resource provider allocation