Lego: Let’s Encrypt client and ACME library

Get latest version

wget https://github.com/go-acme/lego/releases/download/v3.2.0/lego_v3.2.0_linux_amd64.tar.gz -qO- | tar -C /tmp -xz lego
# stop service on port 80 / 443
service nginx stop
service apache2 stop
 
# Request certificate
/tmp/lego --accept-tos --email="foo@bar.com" --domains="bar.com" --http run
 
# Request wildcard certificate
/tmp/lego --accept-tos --email="foo@bar.com" --domains="bar.com" --domains="*.bar.com" --dns manual run
 
# restart service on port 80 / 443
service nginx start
service apache2 start

Show certificates

ll ~/.lego/certificates/

OpenStack Designate

# test with designage
/tmp/lego --accept-tos --email="foo@bar.com" --domains="*.bar.com" --dns designate run
... designate: some credentials information are missing: OS_AUTH_URL,OS_USERNAME,OS_PASSWORD,OS_TENANT_NAME,OS_REGION_NAME

Request wildcart certificate

LXD: storage

Configure default storage

lxc storage create zfs zfs source=rpool/lxd
lxc profile device add default root disk path=/ pool=zfs
lxc storage list
lxc storage delete default
 
# zfs
lxc storage create zfs zfs source=tank/lxd
lxc storage list
 
# delete default storage
lxc storage volume list default
lxc storage volume delete default image/7d788819a5a97433db8470ee68370ec69e829b429800fa28b5524f0411490ce9
lxc storage delete default
 
# move container to another storage
CONTAINER=www1
lxc move ${CONTAINER} ${CONTAINER}-tmp -s nvme
lxc move ${CONTAINER}-tmp ${CONTAINER}
lxc start ${CONTAINER}

Configure ZFS storage

lxc profile device del dev-zfs root
lxc profile device add dev-zfs root disk path=/ pool=zfs

Storage

# change container storage quota
lxc config device set <CONTAINER_NAME> root size 100GB
 
# lvm thin pool
lvcreate -L 250G --thinpool kvm system
lxc storage create kvm lvm source=system lvm.thinpool_name=kvm
 
# unix-block
lxc config device add c1 xvdb1 unix-block source=/dev/xvdb1 required=false
lxc config device remove gitlab-runner3-dev xvdb2

Container root

GitLab: Docker CI pipeline

Optinal: Create nested LXD container
http://www.panticz.de/lxd/nesting

CONTAINER_NAME=gitlab-runner1-dev
lxc launch ubuntu:18.04 ${CONTAINER_NAME} -p disk-zfs -p nic-dev-mgmt -c boot.autostart=true -c security.nesting=true -c security.privileged=true
#-c volatile.dev-mgmt.hwaddr=00:11:22:33:44:55
 
lxc exec ${CONTAINER_NAME} -- apt update
lxc exec ${CONTAINER_NAME} -- apt dist-upgrade
lxc exec ${CONTAINER_NAME} -- apt purge -y lxd lxd-client snapd unattended-upgrades
lxc exec ${CONTAINER_NAME} -- apt autoremove
 
lxc file push /root/.ssh/authorized_keys ${CONTAINER_NAME}/root/.ssh/authorized_keys
lxc exec ${CONTAINER_NAME} -- bash -c "sed -i 's/eth0:/dev-mgmt:/g' /etc/netplan/50-cloud-init.yaml"
lxc exec ${CONTAINER_NAME} -- netplan apply
 
printf 'lxc.apparmor.profile = unconfined\nlxc.cgroup.devices.allow = a\nlxc.mount.auto=proc:rw sys:rw\nlxc.cap.drop=' | lxc config set ${CONTAINER_NAME} raw.lxc -
lxc restart ${CONTAINER_NAME}

Install Docker inside LXD container
# http://www.panticz.de/install-docker

FIX: While synchronizing instance power states, found XX instances in the database and XX instances on the hypervisor

Check kibana logs for
"While synchronizing instance power states, found"

Check libvirt VMs vs. nova DB

COMPUTE_NODE=com1-dev
 
# get shutoff VMs on compute node
VMS_COMPUTE=$(ssh ${COMPUTE_NODE} docker exec -i nova_libvirt virsh list --state-shutoff --uuid | sed '/^$/d' | sort)
# echo "${VMS_COMPUTE}"
 
# get shutoff VMs from nova DB
VMS_NOVA=$(ssh os-admin-dev "source /etc/kolla/admin-openrc.sh; openstack server list --all --host ${COMPUTE_NODE} -c ID -f value --status SHUTOFF" | sort)
# echo "${VMS_NOVA}"
 
# diff shutoff VMs
comm -3 <(echo "${VMS_COMPUTE}") <(echo "${VMS_NOVA}")

Remove shutdown VM from libvirt

OpenVswitch: cleanup interfaces without tap device

Show bridges without tab device

brctl show | egrep "qvb|tap" | sed '$!N;/\n.*tap/d;P;D' | awk '{print substr($1,4,8)}'

Get interface details

TOKEN=cac559da
 
# show port details
docker exec openvswitch_vswitchd ovsdb-client dump | grep ${TOKEN}
 
# get OVS port
docker exec openvswitch_vswitchd ovs-vsctl list-ports br-int | grep ${TOKEN}
 
# get OVS interface
docker exec openvswitch_vswitchd ovs-vsctl list-ifaces br-int | grep ${TOKEN}
 
# show host bridges
brctl show | grep ${TOKEN}
 
# show host interfaces
ip a | grep ${TOKEN}

Delete interface

OpenStack: project

Create project

openstack project create foo-project --domain bar
 
ROLES="
creator
heat_stack_owner
load-balancer_member
member
"
 
for ROLE in ${ROLES}; do
    openstack role add --user ${USER_ID} --project ${PROJECT_ID} ${ROLE}
done
 
openstack role assignment list --names --user ${USER_ID}