OpenStack: Extend public IP range

Show subnet details
openstack subnet list --network public
openstack subnet show public-

Modify database
DB_PASS=$(grep neutron_database_password /etc/kolla/passwords.yml | cut -d " " -f2)
mysql -h --password=${DB_PASS} -P 6033 -u neutron -D neutron
select * from neutron.subnets where cidr like '10.0.0%';
update subnets set cidr='' WHERE cidr = '';

# update subnet name
openstack subnet set --name public- public-

# extend allocation-pool

OpenStack: server (VM)

# List instances / VMs
openstack server list
openstack server list -c ID -c Name -c Status -c Networks -c Host --long

openstack server create foo-vm1 \
--image "Ubuntu 18.04" \
--flavor m1.small \
--key-name foo-key \
--network foo-net

# delete instance
openstack server delete "vm-u1804"
# list all servers from all projects
openstack server list --all-projects --os-cloud=stage-admin
# get IDs only from server
openstack server list --os-cloud=dev-foo -c ID -f value
# get all servers using windows images

Nginx: Log client ip behind NAT with http_x_forwarded_for (X-Forwarded-For Header)

Use nginx real_ip module
nginx -V | grep with-http_realip_module
# /etc/nginx/nginx.conf
http {
# set_real_ip_from;
set_real_ip_from x.x.x.x/x; # LB subnet
real_ip_header X-Forwarded-For;

Option 2: customize log_format
cat /etc/nginx/nginx.conf
log_format main '$http_x_forwarded_for - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"';
access_log /var/log/nginx/access.log main;

Reload Nginx configuration

Fix Octavia LB provisioning_status from PENDING_UPDATE or ERROR to ACTIVE

Show Loadbalancer state
# List all LoadBalancer
openstack loadbalancer list

# List LoadBalancer details
openstack loadbalancer show 0ce30f0e-1d75-486c-a09f-79125abf44b8

# List LoadBalancer VMs details
openstack loadbalancer amphora list --loadbalancer 0ce30f0e-1d75-486c-a09f-79125abf44b8

# List all Octavia LB / VMs
openstack server list --all --long --name amphora --os-cloud=dev-admin

Manual update provisioning_status from PENDING_UPDATE / ERROR state to ACTIVE in Octavia Database

OpenStack: flavor

# list all flavors
openstack flavor list --sort-column Name --all

# create
openstack server create --flavor m1.small --image "Ubuntu 18.04" --nic net-id=foo-network --security-group default --key-name foo-key foo-vm1

Formated output
for FLAVOR in $(openstack flavor list --sort-column Name -c Name -f value); do
echo ${FLAVOR}
openstack flavor show ${FLAVOR}

delete all flavors
openstack flavor list --all -c ID -f value | xargs openstack flavor delete

OpenStack: Allow user access to tanent projects

Get mgmt user data
# get user ID and domain ID
MGMT_USER_ID=$(openstack user list --long -c ID -c Name -f value | grep | cut -d" " -f1)
echo ${MGMT_USER_ID}

# get projects
openstack project list --long | grep safyievOokEgavUtdytPeurmebKowEff

# get assignments
openstack role assignment list --user JekUvyeijHaDrithWianvestUtevLiUk --project e72c94c20b4d40e3b971bc510d536e87 --names

# get Domain name
openstack domain list | grep safyievOokEgavUtdytPeurmebKowEff

Search tanent data
# get user domain ID

kubectl - Kubernetes CLI client

sudo apt-get update
sudo apt-get install -y apt-transport-https
curl -s | sudo apt-key add -
echo "deb kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl kubeadm

Bash completion
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl

Manual installation

OpenStack: Magnum

openstack coe service list
openstack stack list
openstack stack show d8a1c3af-7993-4493-91be-19cfce38a870
openstack coe cluster update k8s-cluster replace node_count=2

Configure deployment kolla-ansible
# cat /etc/kolla/config/magnum.conf
default_docker_volume_type = VT1

cluster_user_trust = True

#region_name = ch-zh1
region_name_for_services = RegionOne

# /etc/kolla/globals.yml
# magnum_tag: ""
enable_magnum: "yes"

Redeploy / Reconfigure container

Kubernetes dashboard UI

Add user and configure permissions
cat < /tmp/dashboard-adminuser.yaml
apiVersion: v1
kind: ServiceAccount
name: admin-user
namespace: kube-system
kubectl apply -f /tmp/dashboard-adminuser.yaml

cat < /tmp/kubernetes-dashboard-admin.rbac.yaml
kind: ClusterRoleBinding
name: admin-user
kind: ClusterRole
name: cluster-admin
- kind: ServiceAccount
name: admin-user
namespace: kube-system