LXD container
Create default container
Create default container
Define node variables
NODE=com4-dev NODE_BMC_HOST=com4-dev.ipmi.dev.i.example.com NODE_MAC_NIC1=00:11:22:33:44:55
Define env variables
NODE_BMC_USER=ADMIN NODE_BMC_PASS=ADMIN
Create now node with Redfish (pxe device boot broken)
https://docs.openstack.org/ironic/latest/admin/drivers/redfish.html
openstack baremetal node create \ --name ${NODE} \ --driver redfish \ --driver-info redfish_address="https://${NODE_BMC_HOST}" \ --driver-info redfish_username=${NODE_BMC_USER} \ --driver-info redfish_password=${NODE_BMC_PASS} \ --driver-info redfish_verify_ca=false \ --driver-info redfish_system_id=/redfish/v1/Systems/1
Create now node with IPMI
https://docs.openstack.org/ironic/latest/admin/drivers/ipmitool.html
openstack baremetal node create \ --name ${NODE} \ --driver ipmi \ --driver-info ipmi_address=${NODE_BMC_HOST} \ --driver-info ipmi_username=${NODE_BMC_USER} \ --driver-info ipmi_password=${NODE_BMC_PASS}
iPXE
https://docs.openstack.org/ironic/latest/admin/interfaces/boot.html#pxe-boot
# Intel Ethernet Connections Boot Utility, Preboot Images und EFI-Treiber
wget https://downloadmirror.intel.com/29137/eng/Preboot.tar.gz tar xzf Preboot.tar.gz -C /tmp cd /tmp/APPS/BootUtil/Linux_x64 chmod +x bootutil64e ./bootutil64e
Ansible galaxy
ansible-galaxy install
https://galaxy.ansible.com/bennojoy/network_interface/ - Network configuration
https://github.com/Oefenweb/ansible-postfix
https://galaxy.ansible.com/geerlingguy/gitlab/
Linux software RAID (mdadm)
https://galaxy.ansible.com/mrlesmithjr/mdadm
ansible-galaxy install mrlesmithjr.mdadm
# install multipass sudo snap install multipass --edge --classic snap info multipass multipass version # create VM multipass launch --name vm1 multipass launch --name vm5 --disk 4G --mem 256M core18 multipass exec vm3 -- lsb_release -a multipass mount /tmp/mp vm3 multipass transfer /etc/fstab vm3:/tmp/y multipass exec vm3 -- sudo apt update && sudo apt dist-upgrade -y
SSH login to VM
sudo ssh ubuntu@<VM_IP> -i /var/snap/multipass/common/data/multipassd/ssh-keys/id_rsa
multipass delete vm2 multipass purge sudo multipass set local.driver=libvirt
Links
https://multipass.run/
https://multipass.run/docs/launch-command
#!/bin/bash export OS_ENV="@globals.environment@" if [ "${OS_ENV}" == "dev" ]; then export PYENV_ROOT="$HOME/.pyenv" export PATH="$PYENV_ROOT/bin:$PATH" eval "$(pyenv init -)" fi source /etc/kolla/admin-openrc.sh EXIT_CODE=0 # search for broken ovs entry in DB for NODE in $(openstack compute service list -c Host -f value | sort -u); do OUTPUT=$(ssh ${NODE} docker exec openvswitch_vswitchd ovsdb-client dump | grep qvo | egrep -v "tag|mac" | cut -d "\"" -f2) for PORT in ${OUTPUT}; do printf "%-20s %s\n" "${NODE}" "${PORT}" EXIT_CODE=1
#!/bin/bash source /etc/kolla/admin-openrc.sh EXIT_CODE=0 # search for server with status ERROR OUTPUT="$(openstack server list --all --status=ERROR -c ID -c Name -c Status -f value)" #openstack server show ${SERVER_ID} -c fault -f value if [ -n "${OUTPUT}" ]; then echo "${OUTPUT}" EXIT_CODE=1 fi # search for server with status VERIFY_RESIZE OUTPUT="$(openstack server list --all --status=VERIFY_RESIZE -c ID -c Name -c Status -f value)" if [ -n "${OUTPUT}" ]; then echo "${OUTPUT}" EXIT_CODE=1 fi # search for server processes on wrong compute node for COMPUTE_NODE in $(openstack compute service list --service nova-compute -c Host -f value); do for SERVER_ID in $(ssh ${COMPUTE_NODE} pgrep qemu -a | grep -o -P '(?<=-uuid ).*(?= -smbios)'); do
Install CLI client
https://docs.openstack.org/releasenotes/osc-placement/
pip install openstack-placement echo 3.5.6 > .python-version
resource provider allocation
# list hypervisor openstack resource provider list --sort-column name # show openstack resource provider allocation show ${SERVER_ID} # delete openstack resource provider allocation delete ${SERVER_ID}
Check multiple / broken resource provider allocation
OpenStack admin
openstack image set --property hw_qemu_guest_agent=yes ${IMAGE_ID}
Within the VM
# check whather hw_qemu_guest_agent is enabled ls -l /dev/virtio-ports/org.qemu.guest_agent.0 # install qemu-guest-agent sudo apt-get install -y qemu-guest-agent
supported_commands
docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-info"}'
Execute command
openstack server show d82ca1de-1fcd-4ca6-84db-84891ec37796 -c OS-EXT-SRV-ATTR:hypervisor_hostname -c OS-EXT-SRV-ATTR:instance_name docker exec -ti nova_libvirt virsh qemu-agent-command instance-000069d9 '{"execute":"guest-network-get-interfaces"}'
Links
https://www.sebastien-han.fr/blog/2015/02/09/openstack-perform-consistent-snapshots-with-qemu-guest-agent/
https://www.ovh.com/blog/create-and-use-openstack-snapshots/
http://wiki.stoney-cloud.org/wiki/Qemu_Guest_Agent_Integration
Install crudini
docker exec -ti -u root neutron_l3_agent apt update docker exec -ti -u root neutron_l3_agent apt install -y crudini
Create configuration
docker exec -ti neutron_l3_agent bash umask 077 cat /etc/neutron/neutron.conf > /etc/neutron/debug.ini crudini --merge /etc/neutron/debug.ini < /etc/neutron/l3_agent.ini
Export credentials
unset HISTFILE # cat /etc/kolla/admin-openrc.sh # paste export OS_XXX
Get network ID
SERVER_ID=074e2a72-9bd7-488f-af3d-f45f3bc0b6e7 PORT_ID=$(openstack port list --server ${SERVER_ID} -c id -f value) openstack port show ${PORT_ID} -c network_id -f value
Create probe
neutron-debug --config-file /etc/neutron/debug.ini probe-create ${NETWORK_ID}
Get probe port ID