NVMe

Install

sudo apt install -y nvme-cli

CLI

# list devices
nvme list
 
nvme smart-log /dev/nvme0n1
 
isdct show -d DeviceStatus,Index,Firmware,FirmwareUpdateAvailable -intelssd
 
# format
https://manpages.ubuntu.com/manpages/jammy/en/man1/nvme-format.1.html
nvme format --force /dev/nvmeXn1
blkdiscard --force /dev/nvmeXn1

Fix nvme1: ignoring ctrl due to duplicate subnqn
https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1803692

# dmesg | grep nvme
[    2.546620] nvme nvme0: pci function 0000:5e:00.0
[    2.552447] nvme nvme1: pci function 0000:5f:00.0
[    2.768347] nvme nvme1: ignoring ctrl due to duplicate subnqn (nqn.2017-12.org.nvmexpress:uuid:11111111-2222-3333-4444-555555555555).
[    2.775422] nvme nvme1: Removing after probe failure status: -22
[    2.779813]  nvme0n1: p1 p2

Fix by upgrade NVMe firmware
http://www.panticz.de/intel/nvme

Delete

Octavia: proxy protocol

openstack loadbalancer listener create --name foo-lb1-tcp-80 --protocol TCP --protocol-port 80 foo-lb1
openstack loadbalancer pool create --name foo-lb1-proxy-pool --lb-algorithm ROUND_ROBIN --listener foo-lb1-tcp-80 --protocol PROXY
openstack loadbalancer member create --subnet-id foo-subnet --address 10.0.1.13 --protocol-port 80 foo-lb1-proxy-pool
 
# check whather http_realip_module is available
nginx -V 2>&1 | grep -- 'http_realip_module'
 
# configure nginx
cat /etc/nginx/sites-enabled/default 
...
server {
    listen 80 default_server proxy_protocol;
    set_real_ip_from 10.0.1.17; # incomming proxy IP
    #set_real_ip_from 192.168.1.0/24;
    real_ip_header proxy_protocol;
...
 
cat /etc/nginx/nginx.conf
...
http {
    proxy_set_header X-Real-IP       $proxy_protocol_addr;
    proxy_set_header X-Forwarded-For $proxy_protocol_addr;
...

Links
https://docs.nginx.com/nginx/admin-guide/load-balancer/using-proxy-protocol/
https://www.scaleway.com/en/docs/configure-proxy-protocol-with-a-load-balancer/

Mellanox: SR-IOV (Single Root IO Virtualization)

Install Mellanox Driver
http://www.panticz.de/mellanox/install-dirver

lspci | grep Mellanox
mstconfig -y -d  18:00.1 set SRIOV_EN=1 NUM_OF_VFS=16
 
#cat /etc/modprobe.d/mlnx.conf 
#options mlx4_core num_vfs=5 probe_vf=5
 
apt install -y sysfsutils
 
cat <<EOF> /etc/sysfs.d/mlnx-sriov_numvfs.conf
class/net/ens6f0/device/sriov_numvfs = 8
class/net/ens6f1/device/sriov_numvfs = 8
class/net/ens7f0/device/sriov_numvfs = 8
class/net/ens7f1/device/sriov_numvfs = 8
EOF
 
# /boot/grub/grub.cf
intel_iommu=on
 
ll /sys/class/net/en{p,s}*
echo 8 > /sys/class/net/ens6f0/device/sriov_numvfs

Configure VLAN

LXD: OpenStack CLI (OSC) container

# create container
lxc launch ubuntu:20.04 osc
lxc shell osc
 
# install OpenStack CLI
apt install -y python3-openstackclient python3-neutron-vpnaas python3-octaviaclient python3-barbicanclient
openstack complete | sudo tee /etc/bash_completion.d/openstack
source /etc/bash_completion
 
# configure connection
mkdir -p ~/.config/openstack
cat <<EOF> ~/.config/openstack/clouds.yaml
clouds:
  dev-foo-app:
    auth:
      auth_url: https://keystone.service.example.com/v3
      application_credential_id: "xxxxxxxx"
      application_credential_secret: "xxxxxxxx"
    region_name: "eu-fra1"
    interface: "public"
    identity_api_version: 3
    auth_type: "v3applicationcredential"
EOF
 
echo export OS_CLOUD=dev-foo-app >> .bashrc
 
# test
export OS_CLOUD=dev-foo-app
openstack image list

OpenStack: Output VM list with project and domain as JSON file

IFS=$(echo -en "\n\b")
 
PROJECTS_JSON=$(openstack project list --long -f json)
for PROJECT_JSON in $(echo "${PROJECTS_JSON}" | jq -c '.[]'); do
    PROJECT_ID=$(echo ${PROJECT_JSON} | jq -r .ID)
    PROJECT_NAME=$(echo ${PROJECT_JSON} | jq -r .Name)
    DOMAIN_ID=$(echo ${PROJECT_JSON} | jq -r '."Domain ID"')
    DOMAIN_JSON=$(openstack domain show  ${DOMAIN_ID} -f json)
    DOMAIN_NAME=$(echo ${DOMAIN_JSON} | jq -r .name)
 
    openstack server list --all-projects --long --project ${PROJECT_ID} --sort-column Name -f json | jq .[] | \

script-server (Web UI for scripts)

Install

# install reuired packages
apt install -y unzip python3-tornado
 
# download and instal script-server
mkdir script-server
cd script-server
wget https://github.com/bugy/script-server/releases/download/1.15.2/script-server.zip
unzip script-server.zip
rm script-server.zip
 
# start script-server
./launcher.py

Add job

# cat ./conf/runners/certgen.json 
{
  "name": "certgen",
  "description": "Request Lets Encrypt certificate",
  "script_path": "/usr/local/bin/certgen",
  "parameters": [
    {
      "name": "Domain",
      "default": "example.com"
    }
  ],
  "output_files": [
      "/home/local/certificates/*${Domain}*"
  ]
}

WebUI
http://SERVER_IP:5000/

Redirect port 5000 to 80

iptables -t nat -I PREROUTING -p tcp --dport 80 -j REDIRECT --to-ports 5000

Links
https://github.com/bugy/script-server

Vagrant

Install
https://www.vagrantup.com/downloads.html

# sudo apt install -y jq
 
URL=$(wget https://checkpoint-api.hashicorp.com/v1/check/vagrant -qO- | jq  -r '"https://releases.hashicorp.com/vagrant/" + .current_version + "/vagrant_" + .current_version + "_x86_64.deb"')
wget ${URL} -qP /tmp
sudo dpkg -i /tmp/${URL##*/}
 
vagrant --version

Install with Ansible

- name: Get latest vagrant version
  uri:
    url: https://checkpoint-api.hashicorp.com/v1/check/vagrant
  register: release
 
- set_fact:
    version: "{{ release.json | json_query('current_version') }}"
 
- name: Install Vagrant
  apt:
    deb: https://releases.hashicorp.com/vagrant/{{ version }}/vagrant_{{ version }}_x86_64.deb
  become: yes
 
- name: Adding user ubuntu to group libvirt
  user:
    name: ubuntu
    groups: libvirt
    append: yes
  become: yes
 
- name: Install vagrant-libvirt plugin
  command: sudo -H -u ubuntu vagrant plugin install vagrant-libvirt
  become: yes

CLI

topgrade

Install
https://github.com/r-darwish/topgrade/releases/

URL=https://github.com/r-darwish/topgrade/releases/download/v5.5.0/topgrade-v5.5.0-x86_64-unknown-linux-gnu.tar.gz
 
wget ${URL} -qP /tmp
tar -C /tmp -xzf /tmp/topgrade-v*-x86_64-unknown-linux-gnu.tar.gz
sudo mv /tmp/topgrade /usr/local/sbin

Custom configuration

# ~/.config/topgrade.toml
...
remote_topgrades = ["www.example.com", "db.example.com"]
 
[git]
repos = [
    "~/git/repository_1",
    "~/git/repository_2"
]
...

Run update

topgrade -cy

Run on specific remote host

topgrade -cy --only remotes --remote-host-limit ".*.example.com"

Links
https://github.com/r-darwish/topgrade
https://github.com/r-darwish/topgrade/releases