Kubernetes the hard way

Configure OpenStack application credentials

mkdir -p ~/.config/openstack
 
cat <<EOF> ~/.config/openstack/clouds.yaml
clouds:
  dev-foo:
    auth_type: "v3applicationcredential"
    auth:
      auth_url: https://keystone.service.dev.example.com/v3
      application_credential_id: "YOUR_CREDENTIAL_ID"
      application_credential_secret: "YOUR_CREDENTIAL_PASS"
EOF

Install Terraform

cat <<EOF> /tmp/install-terraform.yml 
---
- hosts: localhost
  tasks:
    - name: Get latest Terraform version
      uri:
        url: https://checkpoint-api.hashicorp.com/v1/check/terraform
      register: response
 
    - set_fact:
        terraform_download_url: "{{ response.json.current_download_url }}"
        terraform_version: "{{ response.json.current_version }}"
 
    - name: Download Terraform {{ terraform_version }}
      unarchive:
        src: "{{ terraform_download_url }}terraform_{{ terraform_version }}_{{ ansible_system | lower }}_amd64.zip"
        remote_src: yes
        dest: ~/bin
        creates: ~/bin/terraform
        mode: 0550
EOF
 
ansible-playbook /tmp/install-terraform.yml

Create test env on OpenStack

# terraform.tf
provider "openstack" {
  cloud      = "dev-foo"
}
 
resource "openstack_networking_network_v2" "network_1" {
  name       = "foo-net"
}
 
resource "openstack_networking_subnet_v2" "subnet_1" {
  name       = "foo-subnet"
  network_id =  openstack_networking_network_v2.network_1.id
  cidr       = "10.0.1.0/24"
}
 
resource "openstack_networking_router_v2" "router_1" {
  name       = "foo-router"
  external_network_id = "88934cac-8d55-40d5-8ff9-bde65011741d"
}
 
resource "openstack_networking_router_interface_v2" "terraform" {
  router_id = openstack_networking_router_v2.router_1.id
  subnet_id = openstack_networking_subnet_v2.subnet_1.id
}
 
resource "openstack_compute_keypair_v2" "keypair_1" {
  name = "foo-key"
  public_key = file("~/.ssh/id_rsa.pub")
}
 
variable "rule_cidr_list" {
  default = [
    "212.10.11.12/32",
    "10.0.1.0/24"
  ]
}
 
# variable "rule_cidr_port" {
#   default = [
#     22,
#     80,
#     443
#   ]
# }
 
resource "openstack_compute_secgroup_v2" "secgroup_1" {
  name        = "foo-sec"
  description = "foo-sec"
 
  rule {
    from_port   = -1
    to_port     = -1
    ip_protocol = "icmp"
    cidr        = "0.0.0.0/0"
  }
 
  dynamic "rule" {
    for_each = var.rule_cidr_list
 
    content {
      from_port   = 22
      to_port     = 22
      ip_protocol = "tcp"
      cidr        = rule.value
    }
  }
 
  dynamic "rule" {
    for_each = var.rule_cidr_list
 
    content {
      from_port   = 80
      to_port     = 80
      ip_protocol = "tcp"
      cidr        = rule.value
    }
  }
 
  dynamic "rule" {
    for_each = var.rule_cidr_list
 
    content {
      from_port   = 443
      to_port     = 443
      ip_protocol = "tcp"
      cidr        = rule.value
    }
  }
}
 
resource "openstack_compute_instance_v2" "bastion" {
  name = "foo-bastion"
  image_name = "CirrOS 0.5.1"
  flavor_name = "m1.tiny"
  key_pair = openstack_compute_keypair_v2.keypair_1.name
  security_groups = [openstack_compute_secgroup_v2.secgroup_1.name]
 
  network {
    uuid = openstack_networking_network_v2.network_1.id
  }
}
 
resource "openstack_networking_floatingip_v2" "fip_1" {
  pool = "public"
}
 
resource "openstack_compute_floatingip_associate_v2" "fip_1" {
  floating_ip = openstack_networking_floatingip_v2.fip_1.address
  instance_id = openstack_compute_instance_v2.bastion.id
}
 
# k8s
resource "openstack_compute_instance_v2" "master" {
  name = "master${count.index + 1}"
  count = 2
  image_name = "Ubuntu 20.04"
  flavor_name = "m1.small"
  key_pair = openstack_compute_keypair_v2.keypair_1.name
  security_groups = [openstack_compute_secgroup_v2.secgroup_1.name]
 
  network {
    uuid = openstack_networking_network_v2.network_1.id
  }
}
 
resource "openstack_compute_instance_v2" "worker" {
  name = "worker${count.index + 1}"
  count = 2
  image_name = "Ubuntu 20.04"
  flavor_name = "m1.medium"
  key_pair = openstack_compute_keypair_v2.keypair_1.name
  security_groups = [openstack_compute_secgroup_v2.secgroup_1.name]
 
  network {
    uuid = openstack_networking_network_v2.network_1.id
  }
}
 
resource "openstack_compute_instance_v2" "lb" {
  name = "lb"
  image_name = "Ubuntu 20.04"
  flavor_name = "m1.micro"
  key_pair = openstack_compute_keypair_v2.keypair_1.name
  security_groups = [openstack_compute_secgroup_v2.secgroup_1.name]
 
  network {
    uuid = openstack_networking_network_v2.network_1.id
  }
}

Deploy envirenmont with Terraform

terraform 0.13upgrade -yes . 
terraform init
#terraform plan
terraform apply -auto-approve

Create client LXD container

lxc launch ubuntu:20.04 k8s-client
lxc file push ~/.ssh/id_rsa* k8s-client/root/.ssh/
 
lxc shell k8s-client
sudo apt update && sudo apt dist-upgrade

Install tools on kubernetes VMs

sudo apt update
sudo apt install -y curl net-tools

Install cfssl

sudo apt install golang-cfssl
 
cfssl version

Install kubectl
https://kubernetes.io/docs/tasks/tools/install-kubectl/

curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl
 
kubectl version --client

/etc/ssh/ssh_config.d/k8s-hard.conf

Host bastion
Hostname 217.11.12.13
User cirros
 
Host master1
Hostname 10.0.1.10
 
Host master2
Hostname 10.0.1.8
 
Host worker1
Hostname 10.0.1.13
 
Host worker2
Hostname 10.0.1.5
 
Host master* worker*
ProxyCommand ssh bastion -W %h:%p
User ubuntu

Generate certificates

# Provision the Certificate Authority (CA)
cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "8760h"
    },
    "profiles": {
      "kubernetes": {
        "usages": ["signing", "key encipherment", "server auth", "client auth"],
        "expiry": "8760h"
      }
    }
  }
}
EOF
 
cat > ca-csr.json << EOF
{
  "CN": "Kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "Kubernetes",
      "OU": "CA",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
# Admin client certificate
cat > admin-csr.json << EOF
{
  "CN": "admin",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "system:masters",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  admin-csr.json | cfssljson -bare admin
WORKER0_HOST=worker1.pool.dev.example.com # Public hostname of your first worker node cloud server
WORKER0_IP=10.0.1.6 # Private IP of your first worker node cloud server
WORKER1_HOST=worker2.pool.dev.example.com #Public hostname of your second worker node cloud server
WORKER1_IP=10.0.1.5 # Private IP of your second worker node cloud server
 
# Kubelet client certificate
cat > ${WORKER0_HOST}-csr.json << EOF
{
  "CN": "system:node:${WORKER0_HOST}",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "system:nodes",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -hostname=${WORKER0_IP},${WORKER0_HOST} \
  -profile=kubernetes \
  ${WORKER0_HOST}-csr.json | cfssljson -bare ${WORKER0_HOST}
 
cat > ${WORKER1_HOST}-csr.json << EOF
{
  "CN": "system:node:${WORKER1_HOST}",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "system:nodes",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -hostname=${WORKER1_IP},${WORKER1_HOST} \
  -profile=kubernetes \
  ${WORKER1_HOST}-csr.json | cfssljson -bare ${WORKER1_HOST}
 
# Kube Controller Manager client cert
cat > kube-controller-manager-csr.json << EOF
{
  "CN": "system:kube-controller-manager",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
 
# Kube Proxy client certificate
cat > kube-proxy-csr.json << EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "system:node-proxier",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  kube-proxy-csr.json | cfssljson -bare kube-proxy
 
# Kube Scheduler client certificate
cat > kube-scheduler-csr.json << EOF
{
  "CN": "system:kube-scheduler",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  kube-scheduler-csr.json | cfssljson -bare kube-scheduler
 
# Kubernetes API server certificate
CERT_HOSTNAME=10.32.0.1,10.0.1.13,master1.pool.dev.example.com,10.0.1.8,master2,10.0.1.4,lb.pool.dev.example.com,127.0.0.1,localhost,kubernetes.default
 
cat > kubernetes-csr.json << EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "Kubernetes",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -hostname=${CERT_HOSTNAME} \
  -profile=kubernetes \
  kubernetes-csr.json | cfssljson -bare kubernetes
 
# Service account certificate
cat > service-account-csr.json << EOF
{
  "CN": "service-accounts",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "US",
      "L": "Portland",
      "O": "Kubernetes",
      "OU": "Kubernetes The Hard Way",
      "ST": "Oregon"
    }
  ]
}
EOF
 
cfssl gencert \
  -ca=ca.pem \
  -ca-key=ca-key.pem \
  -config=ca-config.json \
  -profile=kubernetes \
  service-account-csr.json | cfssljson -bare service-account

Copy certificates to nodes

# copy certificates to the controller nodes:
scp ca.pem worker1.pool.dev.example.com.pem worker2.pool.dev.example.com.pem worker1:~/
scp ca.pem worker1.pool.dev.example.com.pem worker2.pool.dev.example.com.pem worker2:~/
 
# copy certificates to the worker nodes:
scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem service-account-key.pem service-account.pem master1:~/
scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem service-account-key.pem service-account.pem master2:~/

Generate kubeconfig

KUBERNETES_ADDRESS=10.0.1.4
 
# Generate Kubelet Kubeconfigs for worker nodes
for instance in worker1.pool.dev.example.com worker2.pool.dev.example.com; do
  kubectl config set-cluster kubernetes-the-hard-way \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://${KUBERNETES_ADDRESS}:6443 \
    --kubeconfig=${instance}.kubeconfig
 
  kubectl config set-credentials system:node:${instance} \
    --client-certificate=${instance}.pem \
    --client-key=${instance}-key.pem \
    --embed-certs=true \
    --kubeconfig=${instance}.kubeconfig
 
  kubectl config set-context default \
    --cluster=kubernetes-the-hard-way \
    --user=system:node:${instance} \
    --kubeconfig=${instance}.kubeconfig
 
  kubectl config use-context default --kubeconfig=${instance}.kubeconfig
done
 
# Generate a Kube-Proxy Kubeconfig
kubectl config set-cluster kubernetes-the-hard-way \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://${KUBERNETES_ADDRESS}:6443 \
    --kubeconfig=kube-proxy.kubeconfig
 
kubectl config set-credentials system:kube-proxy \
    --client-certificate=kube-proxy.pem \
    --client-key=kube-proxy-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-proxy.kubeconfig
 
kubectl config set-context default \
    --cluster=kubernetes-the-hard-way \
    --user=system:kube-proxy \
    --kubeconfig=kube-proxy.kubeconfig
 
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
 
# Generate a Kube-Controller-Manager Kubeconfig
kubectl config set-cluster kubernetes-the-hard-way \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://127.0.0.1:6443 \
    --kubeconfig=kube-controller-manager.kubeconfig
 
kubectl config set-credentials system:kube-controller-manager \
    --client-certificate=kube-controller-manager.pem \
    --client-key=kube-controller-manager-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-controller-manager.kubeconfig
 
kubectl config set-context default \
    --cluster=kubernetes-the-hard-way \
    --user=system:kube-controller-manager \
    --kubeconfig=kube-controller-manager.kubeconfig
 
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
 
# Generate a Kube-Scheduler Kubeconfig
kubectl config set-cluster kubernetes-the-hard-way \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://127.0.0.1:6443 \
    --kubeconfig=kube-scheduler.kubeconfig
 
kubectl config set-credentials system:kube-scheduler \
    --client-certificate=kube-scheduler.pem \
    --client-key=kube-scheduler-key.pem \
    --embed-certs=true \
    --kubeconfig=kube-scheduler.kubeconfig
 
kubectl config set-context default \
    --cluster=kubernetes-the-hard-way \
    --user=system:kube-scheduler \
    --kubeconfig=kube-scheduler.kubeconfig
 
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
 
# Generate an Admin Kubeconfig
kubectl config set-cluster kubernetes-the-hard-way \
    --certificate-authority=ca.pem \
    --embed-certs=true \
    --server=https://127.0.0.1:6443 \
    --kubeconfig=admin.kubeconfig
 
kubectl config set-credentials admin \
    --client-certificate=admin.pem \
    --client-key=admin-key.pem \
    --embed-certs=true \
    --kubeconfig=admin.kubeconfig
 
kubectl config set-context default \
    --cluster=kubernetes-the-hard-way \
    --user=admin \
    --kubeconfig=admin.kubeconfig
 
kubectl config use-context default --kubeconfig=admin.kubeconfig

Distribute kubeconfig

scp worker1.pool.dev.example.com.kubeconfig worker1.pool.dev.example.com:~/
scp worker2.pool.dev.example.com.kubeconfig worker2.pool.dev.example.com:~/
scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig master1:~/
scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig master2:~/

Generate encrpytion key

ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
 
cat > encryption-config.yaml << EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF
 
scp encryption-config.yaml master1:~/
scp encryption-config.yaml master2:~/

OPTIONAL: (re)copy required files to control nodes

scp admin.kubeconfig ca.pem ca-key.pem encryption-config.yaml kube-controller-manager.kubeconfig kubernetes-key.pem  kubernetes.pem  kube-scheduler.kubeconfig service-account-key.pem service-account.pem  master1:

Etcd
https://github.com/etcd-io/etcd/releases/

# install etcd
URL=https://github.com/etcd-io/etcd/releases/download/v3.4.13/etcd-v3.4.13-linux-amd64.tar.gz
wget -q ${URL} -P /tmp
tar xzf /tmp/etcd-v3.4.13-linux-amd64.tar.gz -C /tmp
sudo cp /tmp/etcd-v*-linux-amd64/etcd* /usr/local/bin/
 
# configure etcd
sudo mkdir -p /etc/etcd /var/lib/etcd
sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/
 
# on fist node
# CONTROLLER_0_INTERNAL_IP
 
ETCD_NAME_1=$(hostname -f)
INTERNAL_IP_1=$(hostname -i)
ETCD_NAME_2=$(echo ${ETCD_NAME_1//1/2})
INTERNAL_IP_2=$(dig +short A ${ETCD_NAME_2})
INITIAL_CLUSTER=${ETCD_NAME_1}=https://${INTERNAL_IP_1}:2380,${ETCD_NAME_2}=https://${INTERNAL_IP_2}:2380
echo ${INITIAL_CLUSTER}
 
# on second node
# CONTROLLER_1_INTERNAL_IP
ETCD_NAME_1=$(hostname -f)
INTERNAL_IP_1=$(hostname -i)
ETCD_NAME_2=$(echo ${ETCD_NAME_1//2/1})
INTERNAL_IP_2=$(dig +short A ${ETCD_NAME_2})
INITIAL_CLUSTER=${ETCD_NAME_1}=https://${INTERNAL_IP_1}:2380,${ETCD_NAME_2}=https://${INTERNAL_IP_2}:2380
 
# on both nodes
cat << EOF | sudo tee /etc/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
 
[Service]
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME_1} \\
  --cert-file=/etc/etcd/kubernetes.pem \\
  --key-file=/etc/etcd/kubernetes-key.pem \\
  --peer-cert-file=/etc/etcd/kubernetes.pem \\
  --peer-key-file=/etc/etcd/kubernetes-key.pem \\
  --trusted-ca-file=/etc/etcd/ca.pem \\
  --peer-trusted-ca-file=/etc/etcd/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --initial-advertise-peer-urls https://${INTERNAL_IP_1}:2380 \\
  --listen-peer-urls https://${INTERNAL_IP_1}:2380 \\
  --listen-client-urls https://${INTERNAL_IP_1}:2379,https://127.0.0.1:2379 \\
  --advertise-client-urls https://${INTERNAL_IP_1}:2379 \\
  --initial-cluster-token etcd-cluster-0 \\
  --initial-cluster ${INITIAL_CLUSTER} \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target
EOF
 
sudo systemctl daemon-reload
sudo systemctl enable etcd
sudo systemctl start etcd
sudo systemctl status etcd
 
sudo ETCDCTL_API=3 etcdctl member list -w table \
  --endpoints=https://127.0.0.1:2379 \
  --cacert=/etc/etcd/ca.pem \
  --cert=/etc/etcd/kubernetes.pem \
  --key=/etc/etcd/kubernetes-key.pem

Installing Kubernetes Control Plane Binaries on both control nodes

ssh master1
ssh mster 2
 
sudo mkdir -p /etc/kubernetes/config
 
wget -q \
  "https://storage.googleapis.com/kubernetes-release/release/v1.10.2/bin/linux/amd64/kube-apiserver" \
  "https://storage.googleapis.com/kubernetes-release/release/v1.10.2/bin/linux/amd64/kube-controller-manager" \
  "https://storage.googleapis.com/kubernetes-release/release/v1.10.2/bin/linux/amd64/kube-scheduler"
 
chmod +x kube-apiserver kube-controller-manager kube-scheduler
 
sudo mv kube-apiserver kube-controller-manager kube-scheduler /usr/local/bin/
 
# https://kubernetes.io/docs/tasks/tools/install-kubectl/
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl
 
# kubectl version --client

Configure kube-api systemd

sudo mkdir -p /var/lib/kubernetes/
 
sudo cp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
  service-account-key.pem service-account.pem \
  encryption-config.yaml /var/lib/kubernetes/
 
 
#INTERNAL_IP=$(curl http://169.254.169.254/latest/meta-data/local-ipv4)
INTERNAL_IP=$(hostname -i)
CONTROLLER0_IP=$(dig +short A master1.$(hostname -d))
CONTROLLER1_IP=$(dig +short A master2.$(hostname -d))
 
cat << EOF | sudo tee /etc/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
  --advertise-address=${INTERNAL_IP} \\
  --allow-privileged=true \\
  --apiserver-count=3 \\
  --audit-log-maxage=30 \\
  --audit-log-maxbackup=3 \\
  --audit-log-maxsize=100 \\
  --audit-log-path=/var/log/audit.log \\
  --authorization-mode=Node,RBAC \\
  --bind-address=0.0.0.0 \\
  --client-ca-file=/var/lib/kubernetes/ca.pem \\
  --enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
  --enable-swagger-ui=true \\
  --etcd-cafile=/var/lib/kubernetes/ca.pem \\
  --etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\
  --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\
  --etcd-servers=https://$CONTROLLER0_IP:2379,https://$CONTROLLER1_IP:2379 \\
  --event-ttl=1h \\
  --experimental-encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
  --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\
  --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\
  --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\
  --kubelet-https=true \\
  --runtime-config=api/all \\
  --service-account-key-file=/var/lib/kubernetes/service-account.pem \\
  --service-cluster-ip-range=10.32.0.0/24 \\
  --service-node-port-range=30000-32767 \\
  --tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\
  --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\
  --v=2 \\
  --kubelet-preferred-address-types=InternalIP,InternalDNS,Hostname,ExternalIP,ExternalDNS
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target
EOF

Configure Kubernetes Controller Manager

sudo cp kube-controller-manager.kubeconfig /var/lib/kubernetes/
 
cat << EOF | sudo tee /etc/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
 
[Service]
ExecStart=/usr/local/bin/kube-controller-manager \\
  --address=0.0.0.0 \\
  --cluster-cidr=10.200.0.0/16 \\
  --cluster-name=kubernetes \\
  --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\
  --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\
  --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\
  --leader-elect=true \\
  --root-ca-file=/var/lib/kubernetes/ca.pem \\
  --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\
  --service-cluster-ip-range=10.32.0.0/24 \\
  --use-service-account-credentials=true \\
  --v=2
Restart=on-failure
RestartSec=5
 
[Install]
WantedBy=multi-user.target
EOF