openstack

OpenStack: Output VM list with project and domain as JSON file

IFS=$(echo -en "\n\b")
 
PROJECTS_JSON=$(openstack project list --long -f json)
for PROJECT_JSON in $(echo "${PROJECTS_JSON}" | jq -c '.[]'); do
    PROJECT_ID=$(echo ${PROJECT_JSON} | jq -r .ID)
    PROJECT_NAME=$(echo ${PROJECT_JSON} | jq -r .Name)
    DOMAIN_ID=$(echo ${PROJECT_JSON} | jq -r '."Domain ID"')
    DOMAIN_JSON=$(openstack domain show  ${DOMAIN_ID} -f json)
    DOMAIN_NAME=$(echo ${DOMAIN_JSON} | jq -r .name)
 
    openstack server list --all-projects --long --project ${PROJECT_ID} --sort-column Name -f json | jq .[] | \

Get noisy neighbor VMs

source /etc/kolla/admin-openrc.sh
 
IFS=$(echo -en "\n\b")
 
function get_vm_details() {
    LINE=$1
 
    SERVER_ID=$(echo ${LINE} | cut -d" " -f3)
    SERVER_JSON=$(openstack server show ${SERVER_ID} -f json)
    SERVER_NAME=$(echo ${SERVER_JSON} | jq -r .name)
    SERVER_PROJECT_ID=$(echo ${SERVER_JSON} | jq -r .project_id)
    SERVER_PROJECT_JSON=$(openstack project show ${SERVER_PROJECT_ID} -f json)
    SERVER_PROJECT_NAME=$(echo ${SERVER_PROJECT_JSON} | jq -r .name)
 
    echo "${LINE} ${SERVER_NAME} ${SERVER_PROJECT_NAME}"
}
 

Check OpenvSwitch

#!/bin/bash
 
export OS_ENV="@globals.environment@"
 
 
if [ "${OS_ENV}" == "dev" ]; then
    export PYENV_ROOT="$HOME/.pyenv"
    export PATH="$PYENV_ROOT/bin:$PATH"
    eval "$(pyenv init -)"
fi
 
source /etc/kolla/admin-openrc.sh
 
EXIT_CODE=0
 
# search for broken ovs entry in DB
for NODE in $(openstack compute service list -c Host -f value | sort -u); do
    OUTPUT=$(ssh ${NODE} docker exec openvswitch_vswitchd ovsdb-client dump | grep qvo | egrep -v "tag|mac" | cut -d "\"" -f2)
    for PORT in ${OUTPUT}; do
        printf "%-20s %s\n" "${NODE}" "${PORT}"
 
        EXIT_CODE=1

Check server

#!/bin/bash
 
source /etc/kolla/admin-openrc.sh
 
EXIT_CODE=0
 
# search for server with status ERROR
OUTPUT="$(openstack server list --all --status=ERROR -c ID -c Name -c Status -f value)"
#openstack server show ${SERVER_ID} -c fault -f value
if [ -n "${OUTPUT}" ]; then
    echo "${OUTPUT}"
 
    EXIT_CODE=1
fi
 
# search for server with status VERIFY_RESIZE
OUTPUT="$(openstack server list --all --status=VERIFY_RESIZE -c ID -c Name -c Status -f value)"
if [ -n "${OUTPUT}" ]; then
    echo "${OUTPUT}"
 
    EXIT_CODE=1
fi
 
# search for server processes on wrong compute node
for COMPUTE_NODE in $(openstack compute service list --service nova-compute -c Host -f value); do
    for SERVER_ID in $(ssh ${COMPUTE_NODE} pgrep qemu -a | grep -o -P '(?<=-uuid ).*(?= -smbios)'); do

OpenStack: placement (resource provider)

Install CLI client
https://docs.openstack.org/releasenotes/osc-placement/

pip install openstack-placement
 
echo 3.5.6 > .python-version

resource provider allocation

RESOURCE_PROVIDER_ID=$(openstack resource provider list --name ${NODE_NAME}.$(hostname -d) -c uuid -f value)
openstack resource provider show --allocations ${RESOURCE_PROVIDER_ID} -c allocations -f json
 
 
# list hypervisor
openstack resource provider list --sort-column name
 
# show
openstack resource provider allocation show ${SERVER_ID}
 
 
 
NODE_NAME=com1-prod.example.com
RESOURCE_PROVIDER_UUID=$(openstack resource provider list --name ${NODE_NAME} -c uuid -f value)
openstack resource provider inventory list ${RESOURCE_PROVIDER_UUID}
openstack resource provider usage show ${RESOURCE_PROVIDER_UUID}
 
 
openstack allocation candidate list --resource VCPU=32  --resource DISK_GB=120 --resource MEMORY_MB=4096 --member-of 3f0d0e40-6cf4-422d-a245-ceaffb0ac037 

Add resource provider to aggregate

qemu guest agent

OpenStack admin

openstack image set --property hw_qemu_guest_agent=yes ${IMAGE_ID}

Within the VM

# check whather hw_qemu_guest_agent is enabled
ls -l /dev/virtio-ports/org.qemu.guest_agent.0
 
# install qemu-guest-agent
sudo apt-get install -y qemu-guest-agent

supported_commands

docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-info"}'

Execute command

openstack server show d82ca1de-1fcd-4ca6-84db-84891ec37796 -c OS-EXT-SRV-ATTR:hypervisor_hostname -c OS-EXT-SRV-ATTR:instance_name
docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-network-get-interfaces"}'

Links
https://www.sebastien-han.fr/blog/2015/02/09/openstack-perform-consistent-snapshots-with-qemu-guest-agent/
https://www.ovh.com/blog/create-and-use-openstack-snapshots/
http://wiki.stoney-cloud.org/wiki/Qemu_Guest_Agent_Integration

Create neutron probe

Install crudini

docker exec -ti -u root neutron_l3_agent apt update
docker exec -ti -u root neutron_l3_agent apt install -y crudini

Create configuration

docker exec -ti neutron_l3_agent bash
umask 077
cat /etc/neutron/neutron.conf > /etc/neutron/debug.ini
crudini --merge /etc/neutron/debug.ini < /etc/neutron/l3_agent.ini

Export credentials

unset HISTFILE
# cat /etc/kolla/admin-openrc.sh
# paste export OS_XXX

Get network ID

SERVER_ID=074e2a72-9bd7-488f-af3d-f45f3bc0b6e7
 
PORT_ID=$(openstack port list --server ${SERVER_ID} -c id -f value)
openstack port show ${PORT_ID} -c network_id -f value

Create probe

neutron-debug --config-file /etc/neutron/debug.ini probe-create ${NETWORK_ID}

Get probe port ID

Barbican (Secret)

# list all secrets
openstack secret list
 
# download
openstack secret get https://barbican.service.example.com/v1/secrets/d5794ec0-a86f-420f-8d03-b1b11b4251bd
  --payload_content_type application/octet-stream \
  --file /tmp/file1.out 
 
# Crate secret / certificate
openstack secret store --name=cert1 -t "application/octet-stream" -e base64 --payload="$(base64 < cert1.p12)"

ACL
https://docs.openstack.org/python-barbicanclient/latest/cli/cli_usage.html

# list allowed user
openstack acl get https://barbican.service.example.com/v1/secrets/1111111-2222-3333-4444-5555555555555
 
# allow access for user to secret
openstack acl user add -u ${USER_ID} https://barbican.service.example.com/v1/secrets/1111111-2222-3333-4444-5555555555555

Test
https://docs.citrix.com/en-us/citrix-application-delivery-management-software/13/orchestration/integrate-with-openstack-platform/openstack-configuring-lbaasv2-using-command-line.html