OpenStack: Debug VM

#!/bin/bash

export SERVER_ID=@option.vm@

# search for VM by name
if [ ${#SERVER_ID} -ne 36 ]; then
    RESULT="$(openstack server list --all --name ${SERVER_ID})"
    if [ $(echo "${RESULT}" | wc -l) -eq 5 ]; then
        SERVER_ID=$(echo "${RESULT}" | tail -2 | head -1 | cut -d " " -f2)
    else
        echo "Found several VMs, please choose one from:"
        echo "${RESULT}"
        exit 0
    fi
fi

echo -e "VM:"
openstack server show -c name -c id -c addresses -c OS-EXT-SRV-ATTR:host -c status ${SERVER_ID}

echo -e "\nProject:"
PROJECT_ID=$(openstack server show -c project_id -f value ${SERVER_ID})
openstack project show -c id -c name -c description ${PROJECT_ID}

echo -e "\nDomain:"
DOMAIN_ID=$(openstack project show -c domain_id -f value ${PROJECT_ID})
openstack domain show -c id -c name -c description ${DOMAIN_ID}

echo -e "\nServer:"
openstack server show ${SERVER_ID}

echo -e "\nConsole:"
openstack console url show ${SERVER_ID}

echo -e "\nMigration(s):"
nova migration-list --instance-uuid ${SERVER_ID}

echo -e "\nVM Port(s):"
#nova interface-list ${SERVER_ID}
openstack port list --server ${SERVER_ID} --long
PORT_IDS=$(openstack port list --server ${SERVER_ID} -c id -f value)

for PORT_ID in ${PORT_IDS}; do
    NETWORK_ID=$(openstack port show ${PORT_ID} -c network_id -f value)
    NETWORK_NAME=$(openstack network show ${NETWORK_ID} -c name -f value)

    echo -e "\n+++++ Start network ${NETWORK_NAME} +++++"
    
    echo -e "\nNetwork:"
    openstack network show ${NETWORK_ID}
    
    echo -e "\nSubnet:"
    SUBNET_ID=$(openstack subnet list --network ${NETWORK_ID} -c ID -f value)
    openstack subnet show ${SUBNET_ID}
    
    echo -e "\nNetwork ports:"
    openstack port list --network ${NETWORK_ID}
    
    echo -e "\nSecurity group(s):"
    #SECURITY_GROUP_IDS="$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r .security_group_ids[])"
    # workaround for old OSC
    SECURITY_GROUP_IDS="$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r .security_group_ids | tr ',' '\n')"
    for SECURITY_GROUP_ID in ${SECURITY_GROUP_IDS}; do
        openstack security group show ${SECURITY_GROUP_ID}
    done
    
    echo -e "\nRouter:"
    ROUTER_DEVICE_ID=$(openstack port list --network ${NETWORK_ID} --device-owner network:ha_router_replicated_interface -c device_id -f value)
    if [ ! -z ${ROUTER_DEVICE_ID} ]; then
        ROUTER_HOSTS=$(openstack port list --device-id ${ROUTER_DEVICE_ID} --device-owner network:router_ha_interface -c binding_host_id -f value --sort-column binding_host_id)
        for ROUTER_HOST in ${ROUTER_HOSTS}; do
            echo "ssh -t ${ROUTER_HOST} ip netns exec qrouter-${ROUTER_DEVICE_ID} bash"
            ssh ${ROUTER_HOST} ip netns exec qrouter-${ROUTER_DEVICE_ID} ip a | sed -n '/BROADCAST/,$p' | egrep -v "inet6|valid_lft"
            echo
        done
    fi

    echo -e "\nDHCP/DNS:"
    DHCP_HOSTS=$(openstack port list --network ${NETWORK_ID} --device-owner network:dhcp -c binding_host_id -f value --sort-column binding_host_id)
    for DHCP_HOST in ${DHCP_HOSTS}; do
        echo "ssh -t ${DHCP_HOST} ip netns exec qdhcp-${NETWORK_ID} bash"
        ssh ${DHCP_HOST} ip netns exec qdhcp-${NETWORK_ID} ip a | sed -n '/BROADCAST/,$p' | egrep -v "inet6|valid_lft"
        echo
    done
        
    echo "+++++ END network ${NETWORK_NAME} +++++"
done
>

Allow (temprary) incomming ping to VM (icmp traffic)

SERVER_ID=51f8bbe2-4a89-4065-a24f-4a6fa47fadd0
 
PORT_ID=$(openstack port list --server ${SERVER_ID} -c id -f value)
echo ${PORT_ID}
 
#DEP: SECURITY_GROUP_ID=$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r .security_group_ids |  tr ',' '\n' | head -1)
SECURITY_GROUP_ID=$(openstack port show ${PORT_ID} -c security_group_ids -f json | jq -r 'first(.security_group_ids[])')
echo ${SECURITY_GROUP_ID}
# openstack security group show ${SECURITY_GROUP_ID}
 
# create security group rule to allow incomming icmp traffic
SECURITY_GROUP_RULE_ID=$(openstack security group rule create --protocol icmp ${SECURITY_GROUP_ID} -c id -f value)
echo ${SECURITY_GROUP_RULE_ID}
openstack security group rule show ${SECURITY_GROUP_RULE_ID}
 
# remove rule
openstack security group rule delete ${SECURITY_GROUP_RULE_ID}

RabbitMQ

Connect to control node(s)

# show container state
docker ps -a | grep rabbitmq
 
# Connect into RabbitMQ Docker container
docker exec -it rabbitmq bash

RabbitMQ state

# Show status
rabbitmq rabbitmqctl status
 
# Show cluster status
rabbitmq rabbitmqctl cluster_status
 
# Show list_queues
rabbitmqctl list_queues

CLI

# Start the App
rabbitmq rabbitmqctl start_app 
 
# Stop the App
rabbitmq rabbitmqctl stop_app
 
# Reset the App
rabbitmq rabbitmqctl reset
 
# Join the cluster
rabbitmq rabbitmqctl join_cluster rabbit@ctl2-stage
 
# Start the App
rabbitmq rabbitmqctl start_app

OpenStack: port

Identify port by MAC

MAC=00:11:22:33:44:55
openstack port list --mac-address ${MAC}
 
SUBNET_ID=b07b6b7a-dfb2-4b58-82cb-1568da8990b3
openstack subnet show ${SUBNET_ID}
 
PROJECT_ID=701e329e-997d-4dfa-b0d0-27a51670ed2d
openstack project show ${PROJECT_ID}

OpenStack: Create certbot VM

Create VM

openstack server create foo-certbot \
    --image "Ubuntu 18.04" \
    --flavor m1.small \
    --key-name foo-key \
    --network foo-net \
    --security-group foo-sec

Add floating IP

FLOATING_IP=$(openstack floating ip create public -c floating_ip_address -f value)
echo ${FLOATING_IP}
openstack server add floating ip foo-certbot ${FLOATING_IP}

Install certbot

ssh ubuntu@${FLOATING_IP} sudo apt update && sudo apt install -y certbot

Create wildcard certificates

export DOMAINS="
dev.example.com
www.example.com
"
 
for DOMAIN in ${DOMAINS}; do
    sudo certbot certonly \
    --manual \
    --manual-public-ip-logging-ok \
    --register-unsafely-without-email \
    --agree-tos \
    --preferred-challenges dns-01 \
    --server https://acme-v02.api.letsencrypt.org/directory \
    -d \*.${DOMAIN}
done

Debug

Elasticsearch: curator

Install

sudo apt install -y elasticsearch-curator
# pip install elasticsearch-curator

Show usage

http://elasticsearch.example.com:9200/_cat/indices

curator.yml

---
client:
  hosts:
    - elasticsearch.example.com
  port: 9200
  url_prefix:
  use_ssl: False
  certificate:
  client_cert:
  client_key:
  ssl_no_validate: False
  http_auth:
  timeout: 30
  master_only: False
 
logging:
  loglevel: INFO
  logfile:
  logformat: default
  blacklist: ['elasticsearch', 'urllib3']

delete_indices.yml

---
actions:
  1:
    action: delete_indices
    description: >-
      Delete indices older than 14 days
    options:
      ignore_empty_list: True
      timeout_override:
      continue_if_exception: False
      disable_action: False
    filters:
#    - filtertype: pattern
#      kind: prefix
#      value: logstash-
#      #value: flog-
#      exclude:
#    - filtertype: pattern
#      kind: prefix
#      value: flog-
#      exclude:
    - filtertype: age
      source: name
      direction: older
      timestring: '%Y.%m.%d'
      unit: days
      unit_count: 7
      exclude:

Delete old data

curator --config ./curator.yml ./delete_indices.yml #--dry-run

Links
http://www.madhur.co.in/blog/2017/04/09/usingcuratordeleteelasticindex.html
https://cezarypiatek.github.io/post/scheduled-elasticsearch-cleanup/

OpenStack: Designate (DNSaaS)

Configure kolla-ansible

#echo 'designate_tag: "7.0.1.2"' >> /etc/kolla/globals.yml
sed -i 's/[#]dns_interface:/dns_interface:/g' /etc/kolla/globals.yml
sed -i 's/[#]enable_designate: .*/enable_designate: "yes"/g' /etc/kolla/globals.yml
sed -i 's/[#]enable_horizon_designate:/enable_horizon_designate:/g' /etc/kolla/globals.yml
sed -i 's/[#]designate_ns_record: .*/designate_ns_record: "pool.{{ os_environment }}.example.com"/g' /etc/kolla/globals.yml

Configure pool

mkdir -p /etc/kolla/config/foo/designate
 
/etc/kolla/config/foo/designate/pools.yaml
- name: default-bind
  description: Default BIND9 Pool
  attributes: {}
  ns_records:
    - hostname: ns1.dev.i.example.com.
      priority: 1
  nameservers:
    - host: 10.0.4.45
      port: 53
  targets:
    - type: bind9
      description: BIND9 Server 1
      masters:
        - host: 10.0.4.135
          port: 5354
      options:
        host: 10.0.4.45
        port: 53
        rndc_host: 10.0.4.45
        rndc_port: 953
        rndc_key_file: /etc/designate/rndc.key

Deploy Designate container

Lego: Let’s Encrypt client and ACME library

Get latest version

wget https://github.com/go-acme/lego/releases/download/v3.2.0/lego_v3.2.0_linux_amd64.tar.gz -qO- | tar -C /tmp -xz lego
# stop service on port 80 / 443
service nginx stop
service apache2 stop
 
# Request certificate
/tmp/lego --accept-tos --email="foo@bar.com" --domains="bar.com" --http run
 
# Request wildcard certificate
/tmp/lego --accept-tos --email="foo@bar.com" --domains="bar.com" --domains="*.bar.com" --dns manual run
 
# restart service on port 80 / 443
service nginx start
service apache2 start

Show certificates

ll ~/.lego/certificates/

OpenStack Designate

# test with designage
/tmp/lego --accept-tos --email="foo@bar.com" --domains="*.bar.com" --dns designate run
... designate: some credentials information are missing: OS_AUTH_URL,OS_USERNAME,OS_PASSWORD,OS_TENANT_NAME,OS_REGION_NAME

Links
https://github.com/go-acme/lego
https://go-acme.github.io/lego/dns/designate/

LXD: storage

Links
https://github.com/lxc/lxd/blob/master/doc/storage.md

Configure default storage

lxc storage create zfs zfs source=rpool/lxd
lxc profile device add default root disk path=/ pool=zfs
lxc storage list
lxc storage delete default
 
# zfs
lxc storage create zfs zfs source=tank/lxd
lxc storage list
 
# delete default storage
lxc storage volume list default
lxc storage volume delete default image/7d788819a5a97433db8470ee68370ec69e829b429800fa28b5524f0411490ce9
lxc storage delete default
 
# move container to another storage
CONTAINER=www1
lxc move ${CONTAINER} ${CONTAINER}-tmp -s nvme
lxc move ${CONTAINER}-tmp ${CONTAINER}
lxc start ${CONTAINER}

Configure ZFS storage

lxc profile device del dev-zfs root
lxc profile device add dev-zfs root disk path=/ pool=zfs

Storage

# change container storage quota
lxc config device set <CONTAINER_NAME> root size 100GB
 
# lvm thin pool
lvcreate -L 250G --thinpool kvm system
lxc storage create kvm lvm source=system lvm.thinpool_name=kvm
 
# unix-block
lxc config device add c1 xvdb1 unix-block source=/dev/xvdb1 required=false
lxc config device remove gitlab-runner3-dev xvdb2

Container root