Install s3cmd (s3 CLI client)

sudo apt install -y s3cmd

Configuration file

s3cmd --configure


s3cmd mb s3://buk1
s3cmd put /tmp/1m.img s3://buk1
s3cmd ls s3://buk1
s3cmd du
s3cmd rm s3://buk1/1m.img
s3cmd rb s3://buk1
s3cmd rb s3://buk1 --recursive
s3cmd rm s3://lfs/ --recursive --force
s3cmd \
  --access_key xxxxxxxxxxx  \
  --secret_key xxxxxxxxxxxxxx \
  --host-bucket '%(bucket)s.s3.example.com' \
  --host s3.example.com \
  --signature-v2 \
  --no-preserve \
  ls s3://lpz-duply


s3cmd setacl s3://bucket/path/to/file --acl-public
s3cmd info s3://bucket/path/to/file
s3cmd setacl s3://bucket/path/to/file --acl-private




sudo apt install -y s3fs

Create credentials

echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ~/.passwd-s3fs
chmod 600 ~/.passwd-s3fs

Mount s3 bucket

s3fs backups /media/backup \
  -o url=https://s3.example.com \
  -o passwd_file=~/.passwd-s3fs


-o url=https://s3.example.com,allow_other,umask=0000
-o use_cache=/tmp/cache


mybucket1.mydomain.org /mnt/mybucket1 fuse.s3fs _netdev,allow_other,passwd_file=/home/ftpuser/.passwd-aws-s3fs,default_acl=public-read,uid=1001,gid=65534   0 0


NVMe firmware update with Intel SSD Data Center Tool

Intel NVMe firmware update with Intel SSD Data Center Tool
Check for latest version: https://downloadcenter.intel.com/search?keyword=SSD+Firmware+Update+Tool

wget ${URL} -O /tmp/Intel_SSD_Data_Center_Tool_Linux.zip
unzip -d /tmp /tmp/Intel_SSD_Data_Center_Tool_Linux.zip
sudo dpkg -i /tmp/isdct_*_amd64.deb
# Enable support for devices behind LSI RAID controller
isdct set -system EnableLSIAdapter=true
# show all avaiable Intel NVMe devices
isdct show -intelssd
isdct show -d Index,FirmwareUpdateAvailable -intelssd
isdct show -o json -d DevicePath -intelssd | jq -r .[].DevicePath
# update firmware
isdct load -intelssd 0

Flash all Intel NVMe drives

GRML iPXE etboot from HTTP (without NFS)

# downlaod Grml image
wget http://download.grml.org/grml32-small_2013.02.iso -O /tmp/grml32-small_2013.02.iso

# mount image
mount /tmp/grml32-small_2013.02.iso /mnt/ -o loop

# copy Grml files to tftpboot
mkdir /var/lib/tftpboot/live/grml/32-small/
cp /mnt/boot/grml32small/initrd.img /var/lib/tftpboot/live/grml/32-small/
cp /mnt/boot/grml32small/vmlinuz /var/lib/tftpboot/live/grml/32-small/
cp /mnt/live/grml32-small/grml32-small.squashfs /var/lib/tftpboot/live/grml/32-small/

# configure NFS
echo "/var/lib/tftpboot/live/grml/32-small *(ro,no_root_squash,async,no_subtree_check)


configure parameter

# determine the maximum size of a shared memory segment
cat /proc/sys/kernel/shmmax
# set default shared memory limit for shmmax (16 GB)
echo 17179869184 > /proc/sys/kernel/shmmax
# add the following line to /etc/sysctl.conf to make a change permanent
echo "kernel.shmmax=4294967296" >> /etc/sysctl.d/90-shmmax.conf
# load parameter
/sbin/sysctl -p /etc/sysctl.d/90-shmmax.conf

reduce swap usage

OpenStack: Floating IP port forward (in development / experimental)

List floatin IPs

openstack floating ip list
| ID                                   | Floating IP Address | Fixed IP Address | Port                                 | Floating Network                     | Project                          |
| dc049c28-6562-4c37-834b-d3a612d4b580 |        | None             | None                                 | 39583230-154f-4b56-a56e-2fd83c9986ce | 1eede1bdc28344f3acf6b48b232e406f |

List VMs

OpenStack: Debug DHCP

Get HIGH lease rate not found

for NODE in $(openstack compute service list --service nova-scheduler -c Host -f value); do
    echo ${NODE}
    ssh ${NODE} cat /var/lib/docker/volumes/kolla_logs/_data/neutron/dnsmasq.log | grep "lease not found" | grep Sep | awk '{print $1, $2}' | uniq -c | awk '$1 > 100 {print}'

Many dhcp requests

Octavia: Allow SSH login to Amphora VM

Allow SSH access

AMPHORA_ID=$(openstack loadbalancer amphora list --loadbalancer ${LB_ID} --role MASTER -c id -f value)
AMPHORA_COMPUTE_ID=$(openstack loadbalancer amphora show ${AMPHORA_ID} -c compute_id -f value)
LB_NETWORK_IP=$(openstack loadbalancer amphora show ${AMPHORA_ID} -c lb_network_ip -f value)
SECURITY_GROUP_ID=$(openstack port list --server ${AMPHORA_COMPUTE_ID} --fixed-ip "ip-address=${LB_NETWORK_IP}" -c security_group_ids -f value)
# DEBUG: show ingress tcp rules
openstack security group rule list --ingress --protocol tcp ${SECURITY_GROUP_ID}
openstack security group rule create --protocol tcp --dst-port 22:22 --remote-ip  ${SECURITY_GROUP_ID}
openstack loadbalancer amphora list --loadbalancer ${LB_ID} -c  lb_network_ip -c role -f value
openstack loadbalancer amphora list --loadbalancer ${LB_ID} -c  lb_network_ip --role MASTER -f value
# login to amphora VM from OpenStack control node
ssh local@ctl1-dev.dev.i.example.com
ssh -i ~/.ssh/id_rsa_octavia ubuntu@${AMPHORA_VM_IP}

Manuall SSH access