Allow (temprary) incomming ping to VM (icmp traffic)

SERVER_ID=51f8bbe2-4a89-4065-a24f-4a6fa47fadd0
 
PORT_ID=$(openstack port list --server ${SERVER_ID} -c id -f value)
echo ${PORT_ID}
 
#DEP: SECURITY_GROUP_ID=$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r .security_group_ids |  tr ',' '\n' | head -1)
SECURITY_GROUP_ID=$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r 'first(.security_group_ids[])')
echo ${SECURITY_GROUP_ID}
# openstack security group show ${SECURITY_GROUP_ID}
 
# create security group rule to allow incomming icmp traffic
SECURITY_GROUP_RULE_ID=$(openstack security group rule create --protocol icmp ${SECURITY_GROUP_ID} -c id -f value)
echo ${SECURITY_GROUP_RULE_ID}
openstack security group rule show ${SECURITY_GROUP_RULE_ID}
 
# remove rule
openstack security group rule delete ${SECURITY_GROUP_RULE_ID}

RabbitMQ

Connect to control node(s)

# show container state
docker ps -a | grep rabbitmq
 
# Connect into RabbitMQ Docker container
docker exec -it rabbitmq bash

RabbitMQ state

# Show status
rabbitmqctl status
 
# Show cluster status
rabbitmqctl cluster_status
 
# Show list_queues
rabbitmqctl list_queues

CLI

# Start the App
rabbitmqctl start_app 
 
# Stop the App
rabbitmqctl stop_app
 
# Reset the App
rabbitmqctl reset
 
# Force reset
rabbitmqctl force_reset
 
# Join the cluster
rabbitmqctl join_cluster rabbit@ctl2-stage
 
# Start the App
rabbitmqctl start_app
 
# forget node
rabbitmqctl forget_cluster_node rabbit@nodeX
rabbitmqctl join_cluster rabbit@nodeX
 
# cleanup / remove all data
docker exec -it rabbitmq bash
rm -r /var/lib/rabbitmq/*
docker restart rabbitmq

Statistics

rabbitmq-diagnostics observer

OpenStack: port

Identify port by MAC

MAC=00:11:22:33:44:55
openstack port list --mac-address ${MAC}
 
SUBNET_ID=b07b6b7a-dfb2-4b58-82cb-1568da8990b3
openstack subnet show ${SUBNET_ID}
 
PROJECT_ID=701e329e-997d-4dfa-b0d0-27a51670ed2d
openstack project show ${PROJECT_ID}

Add security group to port

SERVER_ID=$(openstack server list --all-projects --name vm1-dev -c ID -f value)
openstack port list --server ${SERVER_ID}
PORT_ID=97006537-07b1-4d37-9e2e-3bb71ad23087
openstack port set --security-group 2060fc87-a1bf-4cf5-a497-f6c4b45cffcd ${PORT_ID}

OpenStack: Create certbot VM

Create VM

openstack server create foo-certbot \
    --image "Ubuntu 18.04" \
    --flavor m1.small \
    --key-name foo-key \
    --network foo-net \
    --security-group foo-sec

Add floating IP

FLOATING_IP=$(openstack floating ip create public -c floating_ip_address -f value)
echo ${FLOATING_IP}
openstack server add floating ip foo-certbot ${FLOATING_IP}

Install certbot

ssh ubuntu@${FLOATING_IP} sudo apt update && sudo apt install -y certbot

Create wildcard certificates

export DOMAINS="
dev.example.com
www.example.com
"
 
for DOMAIN in ${DOMAINS}; do
    sudo certbot certonly \
    --manual \
    --manual-public-ip-logging-ok \
    --register-unsafely-without-email \
    --agree-tos \
    --preferred-challenges dns-01 \
    --server https://acme-v02.api.letsencrypt.org/directory \
    -d \*.${DOMAIN}
done

Debug

Elasticsearch: curator

Install
https://www.elastic.co/guide/en/elasticsearch/client/curator/5.x/apt-repository.html

wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
echo "deb [arch=amd64] https://packages.elastic.co/curator/5/debian stable main" > /etc/apt/sources.list.d/curator.list
sudo apt-get update && sudo apt-get install elasticsearch-curator
 
# sudo apt install -y elasticsearch-curator
# pip install elasticsearch-curator

Show usage

curl elasticsearch.example.com:9200/_cat/indices

curator.yml

---
client:
  hosts:
    - elasticsearch.example.com
  port: 9200
  url_prefix:
  use_ssl: False
  certificate:
  client_cert:
  client_key:
  ssl_no_validate: False
  http_auth:
  timeout: 30
  master_only: False
 
logging:
  loglevel: INFO
  logfile:
  logformat: default
  blacklist: ['elasticsearch', 'urllib3']

delete_indices.yml

OpenStack: Designate (DNSaaS)

Configure kolla-ansible

#echo 'designate_tag: "7.0.1.2"' >> /etc/kolla/globals.yml
sed -i 's/[#]dns_interface:/dns_interface:/g' /etc/kolla/globals.yml
sed -i 's/[#]enable_designate: .*/enable_designate: "yes"/g' /etc/kolla/globals.yml
sed -i 's/[#]enable_horizon_designate:/enable_horizon_designate:/g' /etc/kolla/globals.yml
sed -i 's/[#]designate_ns_record: .*/designate_ns_record: "pool.{{ os_environment }}.example.com"/g' /etc/kolla/globals.yml

Configure pool

mkdir -p /etc/kolla/config/foo/designate
 
/etc/kolla/config/foo/designate/pools.yaml
- name: default-bind
  description: Default BIND9 Pool
  attributes: {}
  ns_records:
    - hostname: ns1.dev.i.example.com.
      priority: 1
  nameservers:
    - host: 10.0.4.45
      port: 53
  targets:
    - type: bind9
      description: BIND9 Server 1
      masters:
        - host: 10.0.4.135
          port: 5354
      options:
        host: 10.0.4.45
        port: 53
        rndc_host: 10.0.4.45
        rndc_port: 953
        rndc_key_file: /etc/designate/rndc.key

Deploy Designate container

Lego: Let’s Encrypt client and ACME library

Get latest version

wget https://github.com/go-acme/lego/releases/download/v3.2.0/lego_v3.2.0_linux_amd64.tar.gz -qO- | tar -C /tmp -xz lego
# stop service on port 80 / 443
service nginx stop
service apache2 stop
 
# Request certificate
/tmp/lego --accept-tos --email="foo@bar.com" --domains="bar.com" --http run
 
# Request wildcard certificate
/tmp/lego --accept-tos --email="foo@bar.com" --domains="bar.com" --domains="*.bar.com" --dns manual run
 
# restart service on port 80 / 443
service nginx start
service apache2 start

Show certificates

ll ~/.lego/certificates/

OpenStack Designate

# test with designage
/tmp/lego --accept-tos --email="foo@bar.com" --domains="*.bar.com" --dns designate run
... designate: some credentials information are missing: OS_AUTH_URL,OS_USERNAME,OS_PASSWORD,OS_TENANT_NAME,OS_REGION_NAME

Request wildcart certificate

LXD: storage

Configure default storage

lxc storage create zfs zfs source=rpool/lxd
lxc profile device add default root disk path=/ pool=zfs
lxc storage list
lxc storage delete default
 
# zfs
lxc storage create zfs zfs source=tank/lxd
lxc storage list
 
# delete default storage
lxc storage volume list default
lxc storage volume delete default image/7d788819a5a97433db8470ee68370ec69e829b429800fa28b5524f0411490ce9
lxc storage delete default
 
# move container to another storage
CONTAINER=www1
lxc move ${CONTAINER} ${CONTAINER}-tmp -s nvme
lxc move ${CONTAINER}-tmp ${CONTAINER}
lxc start ${CONTAINER}

Configure ZFS storage

lxc profile device del dev-zfs root
lxc profile device add dev-zfs root disk path=/ pool=zfs

Storage

# change container storage quota
lxc config device set <CONTAINER_NAME> root size 100GB
 
# lvm thin pool
lvcreate -L 250G --thinpool kvm system
lxc storage create kvm lvm source=system lvm.thinpool_name=kvm
 
# unix-block
lxc config device add c1 xvdb1 unix-block source=/dev/xvdb1 required=false
lxc config device remove gitlab-runner3-dev xvdb2

Container root

GitLab: Docker CI pipeline

Optinal: Create nested LXD container
http://www.panticz.de/lxd/nesting

CONTAINER_NAME=gitlab-runner1-dev
lxc launch ubuntu:18.04 ${CONTAINER_NAME} -p disk-zfs -p nic-dev-mgmt -c boot.autostart=true -c security.nesting=true -c security.privileged=true
#-c volatile.dev-mgmt.hwaddr=00:11:22:33:44:55
 
lxc exec ${CONTAINER_NAME} -- apt update
lxc exec ${CONTAINER_NAME} -- apt dist-upgrade
lxc exec ${CONTAINER_NAME} -- apt purge -y lxd lxd-client snapd unattended-upgrades
lxc exec ${CONTAINER_NAME} -- apt autoremove
 
lxc file push /root/.ssh/authorized_keys ${CONTAINER_NAME}/root/.ssh/authorized_keys
lxc exec ${CONTAINER_NAME} -- bash -c "sed -i 's/eth0:/dev-mgmt:/g' /etc/netplan/50-cloud-init.yaml"
lxc exec ${CONTAINER_NAME} -- netplan apply
 
printf 'lxc.apparmor.profile = unconfined\nlxc.cgroup.devices.allow = a\nlxc.mount.auto=proc:rw sys:rw\nlxc.cap.drop=' | lxc config set ${CONTAINER_NAME} raw.lxc -
lxc restart ${CONTAINER_NAME}

Install Docker inside LXD container
# http://www.panticz.de/install-docker