Kubernetes the hard way

Links
https://github.com/kelseyhightower/kubernetes-the-hard-way

Configure OpenStack application credentials

mkdir -p ~/.config/openstack
 
cat <<EOF> ~/.config/openstack/clouds.yaml
clouds:
  dev-foo:
    auth_type: "v3applicationcredential"
    auth:
      auth_url: https://keystone.service.dev.example.com/v3
      application_credential_id: "YOUR_CREDENTIAL_ID"
      application_credential_secret: "YOUR_CREDENTIAL_PASS"
EOF

Install Terraform

cat <<EOF> /tmp/install-terraform.yml 
---
- hosts: localhost
  tasks:
    - name: Get latest Terraform version
      uri:
        url: https://checkpoint-api.hashicorp.com/v1/check/terraform
      register: response
 
    - set_fact:
        terraform_download_url: "{{ response.json.current_download_url }}"
        terraform_version: "{{ response.json.current_version }}"
 
    - name: Download Terraform {{ terraform_version }}
      unarchive:
        src: "{{ terraform_download_url }}terraform_{{ terraform_version }}_{{ ansible_system | lower }}_amd64.zip"
        remote_src: yes
        dest: ~/bin
        creates: ~/bin/terraform
        mode: 0550
EOF
 
ansible-playbook /tmp/install-terraform.yml

Create test env on OpenStack

OpenStack: Octavia / Amphora LB check

#!/bin/bash
 
source /etc/kolla/admin-openrc.sh
 
 
function show_lb_owner() {
    LB_ID=$1
 
    # show project
    PROJECT_ID=$(openstack loadbalancer show -c project_id -f value ${LB_ID})
    PROJECT_NAME=$(openstack project show -c name -f value ${PROJECT_ID})
 
    # show domain
    DOMAIN_ID=$(openstack project show -c domain_id -f value ${PROJECT_ID})
    DOMAIN_NAME=$(openstack domain show -c name -f value ${DOMAIN_ID})
 
    echo "Domain: ${DOMAIN_NAME}"
    echo "Project: ${PROJECT_NAME}"
}
 
 
EXIT_CODE=0
 
 
# list broken LB
OUTPUT="$(openstack loadbalancer amphora list --provisioning-status ERROR)"
if [ -n "${OUTPUT}" ]; then
    echo "${OUTPUT}"
 
    EXIT_CODE=1
fi
 
# search for broken LB

NVMe

Install

sudo apt install -y nvme-cli

CLI

# list devices
nvme list
 
nvme smart-log /dev/nvme0n1
 
isdct show -d DeviceStatus,Index,Firmware,FirmwareUpdateAvailable -intelssd
 
# format
https://manpages.ubuntu.com/manpages/jammy/en/man1/nvme-format.1.html
nvme format --force /dev/nvmeXn1
blkdiscard --force /dev/nvmeXn1

Fix nvme1: ignoring ctrl due to duplicate subnqn
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1803692

# dmesg | grep nvme
[    2.546620] nvme nvme0: pci function 0000:5e:00.0
[    2.552447] nvme nvme1: pci function 0000:5f:00.0
[    2.768347] nvme nvme1: ignoring ctrl due to duplicate subnqn (nqn.2017-12.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555).
[    2.775422] nvme nvme1: Removing after probe failure status: -22
[    2.779813]  nvme0n1: p1 p2

Fix by upgrade NVMe firmware
http://www.panticz.de/intel/nvme

Delete

Octavia: proxy protocol

openstack loadbalancer listener create --name foo-lb1-tcp-80 --protocol TCP --protocol-port 80 foo-lb1
openstack loadbalancer pool create --name foo-lb1-proxy-pool --lb-algorithm ROUND_ROBIN --listener foo-lb1-tcp-80 --protocol PROXY
openstack loadbalancer member create --subnet-id foo-subnet --address 10.0.1.13 --protocol-port 80 foo-lb1-proxy-pool
 
# check whather http_realip_module is available
nginx -V 2>&1 | grep -- 'http_realip_module'
 
# configure nginx
cat /etc/nginx/sites-enabled/default 
...
server {
    listen 80 default_server proxy_protocol;
    set_real_ip_from 10.0.1.17; # incomming proxy IP
    #set_real_ip_from 192.168.1.0/24;
    real_ip_header proxy_protocol;
...
 
cat /etc/nginx/nginx.conf
...
http {
    proxy_set_header X-Real-IP       $proxy_protocol_addr;
    proxy_set_header X-Forwarded-For $proxy_protocol_addr;
...

Links
https://docs.nginx.com/nginx/admin-guide/load-balancer/using-proxy-protocol/
https://www.scaleway.com/en/docs/configure-proxy-protocol-with-a-load-balancer/

Mellanox: SR-IOV (Single Root IO Virtualization)

Install Mellanox Driver
http://www.panticz.de/mellanox/install-dirver

lspci | grep Mellanox
mstconfig -y -d  18:00.1 set SRIOV_EN=1 NUM_OF_VFS=16
 
#cat /etc/modprobe.d/mlnx.conf 
#options mlx4_core num_vfs=5 probe_vf=5
 
apt install -y sysfsutils
 
cat <<EOF> /etc/sysfs.d/mlnx-sriov_numvfs.conf
class/net/ens6f0/device/sriov_numvfs = 8
class/net/ens6f1/device/sriov_numvfs = 8
class/net/ens7f0/device/sriov_numvfs = 8
class/net/ens7f1/device/sriov_numvfs = 8
EOF
 
# /boot/grub/grub.cf
intel_iommu=on
 
ll /sys/class/net/en{p,s}*
echo 8 > /sys/class/net/ens6f0/device/sriov_numvfs

Configure VLAN

LXD: OpenStack CLI (OSC) container

# create container
lxc launch ubuntu:20.04 osc
lxc shell osc
 
# install OpenStack CLI
apt install -y python3-openstackclient python3-neutron-vpnaas python3-octaviaclient python3-barbicanclient
openstack complete | sudo tee /etc/bash_completion.d/openstack
source /etc/bash_completion
 
# configure connection
mkdir -p ~/.config/openstack
cat <<EOF> ~/.config/openstack/clouds.yaml
clouds:
  dev-foo-app:
    auth:
      auth_url: https://keystone.service.example.com/v3
      application_credential_id: "xxxxxxxx"
      application_credential_secret: "xxxxxxxx"
    region_name: "eu-fra1"
    interface: "public"
    identity_api_version: 3
    auth_type: "v3applicationcredential"
EOF
 
echo export OS_CLOUD=dev-foo-app >> .bashrc
 
# test
export OS_CLOUD=dev-foo-app
openstack image list

OpenStack: Output VM list with project and domain as JSON file

IFS=$(echo -en "\n\b")
 
PROJECTS_JSON=$(openstack project list --long -f json)
for PROJECT_JSON in $(echo "${PROJECTS_JSON}" | jq -c '.[]'); do
    PROJECT_ID=$(echo ${PROJECT_JSON} | jq -r .ID)
    PROJECT_NAME=$(echo ${PROJECT_JSON} | jq -r .Name)
    DOMAIN_ID=$(echo ${PROJECT_JSON} | jq -r '."Domain ID"')
    DOMAIN_JSON=$(openstack domain show  ${DOMAIN_ID} -f json)
    DOMAIN_NAME=$(echo ${DOMAIN_JSON} | jq -r .name)
 
    openstack server list --all-projects --long --project ${PROJECT_ID} --sort-column Name -f json | jq .[] | \