Links
https://github.com/kelseyhightower/kubernetes-the-hard-way
Configure OpenStack application credentials
mkdir -p ~/.config/openstack cat <<EOF> ~/.config/openstack/clouds.yaml clouds: dev-foo: auth_type: "v3applicationcredential" auth: auth_url: https://keystone.service.dev.example.com/v3 application_credential_id: "YOUR_CREDENTIAL_ID" application_credential_secret: "YOUR_CREDENTIAL_PASS" EOF
Install Terraform
cat <<EOF> /tmp/install-terraform.yml --- - hosts: localhost tasks: - name: Get latest Terraform version uri: url: https://checkpoint-api.hashicorp.com/v1/check/terraform register: response - set_fact: terraform_download_url: "{{ response.json.current_download_url }}" terraform_version: "{{ response.json.current_version }}" - name: Download Terraform {{ terraform_version }} unarchive: src: "{{ terraform_download_url }}terraform_{{ terraform_version }}_{{ ansible_system | lower }}_amd64.zip" remote_src: yes dest: ~/bin creates: ~/bin/terraform mode: 0550 EOF ansible-playbook /tmp/install-terraform.yml
Create test env on OpenStack
# terraform.tf provider "openstack" { cloud = "dev-foo" } resource "openstack_networking_network_v2" "network_1" { name = "foo-net" } resource "openstack_networking_subnet_v2" "subnet_1" { name = "foo-subnet" network_id = openstack_networking_network_v2.network_1.id cidr = "10.0.1.0/24" } resource "openstack_networking_router_v2" "router_1" { name = "foo-router" external_network_id = "88934cac-8d55-40d5-8ff9-bde65011741d" } resource "openstack_networking_router_interface_v2" "terraform" { router_id = openstack_networking_router_v2.router_1.id subnet_id = openstack_networking_subnet_v2.subnet_1.id } resource "openstack_compute_keypair_v2" "keypair_1" { name = "foo-key" public_key = file("~/.ssh/id_rsa.pub") } variable "rule_cidr_list" { default = [ "212.10.11.12/32", "10.0.1.0/24" ] } # variable "rule_cidr_port" { # default = [ # 22, # 80, # 443 # ] # } resource "openstack_compute_secgroup_v2" "secgroup_1" { name = "foo-sec" description = "foo-sec" rule { from_port = -1 to_port = -1 ip_protocol = "icmp" cidr = "0.0.0.0/0" } dynamic "rule" { for_each = var.rule_cidr_list content { from_port = 22 to_port = 22 ip_protocol = "tcp" cidr = rule.value } } dynamic "rule" { for_each = var.rule_cidr_list content { from_port = 80 to_port = 80 ip_protocol = "tcp" cidr = rule.value } } dynamic "rule" { for_each = var.rule_cidr_list content { from_port = 443 to_port = 443 ip_protocol = "tcp" cidr = rule.value } } } resource "openstack_compute_instance_v2" "bastion" { name = "foo-bastion" image_name = "CirrOS 0.5.1" flavor_name = "m1.tiny" key_pair = openstack_compute_keypair_v2.keypair_1.name security_groups = [openstack_compute_secgroup_v2.secgroup_1.name] network { uuid = openstack_networking_network_v2.network_1.id } } resource "openstack_networking_floatingip_v2" "fip_1" { pool = "public" } resource "openstack_compute_floatingip_associate_v2" "fip_1" { floating_ip = openstack_networking_floatingip_v2.fip_1.address instance_id = openstack_compute_instance_v2.bastion.id } # k8s resource "openstack_compute_instance_v2" "master" { name = "master${count.index + 1}" count = 2 image_name = "Ubuntu 20.04" flavor_name = "m1.small" key_pair = openstack_compute_keypair_v2.keypair_1.name security_groups = [openstack_compute_secgroup_v2.secgroup_1.name] network { uuid = openstack_networking_network_v2.network_1.id } } resource "openstack_compute_instance_v2" "worker" { name = "worker${count.index + 1}" count = 2 image_name = "Ubuntu 20.04" flavor_name = "m1.medium" key_pair = openstack_compute_keypair_v2.keypair_1.name security_groups = [openstack_compute_secgroup_v2.secgroup_1.name] network { uuid = openstack_networking_network_v2.network_1.id } } resource "openstack_compute_instance_v2" "lb" { name = "lb" image_name = "Ubuntu 20.04" flavor_name = "m1.micro" key_pair = openstack_compute_keypair_v2.keypair_1.name security_groups = [openstack_compute_secgroup_v2.secgroup_1.name] network { uuid = openstack_networking_network_v2.network_1.id } }
Deploy envirenmont with Terraform
terraform 0.13upgrade -yes . terraform init #terraform plan terraform apply -auto-approve
Disable firewall on all VMs
sudo ufw disable
Create client LXD container
lxc launch ubuntu:20.04 k8s-client lxc file push ~/.ssh/id_rsa* k8s-client/root/.ssh/ lxc shell k8s-client sudo apt update && sudo apt dist-upgrade
Install tools on kubernetes VMs
sudo apt update sudo apt install -y curl net-tools
Install cfssl
sudo apt install golang-cfssl cfssl version
Install kubectl
https://kubernetes.io/docs/tasks/tools/install-kubectl/
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list sudo apt-get update sudo apt-get install -y kubectl kubectl version --client
/etc/ssh/ssh_config.d/k8s-hard.conf
Host bastion Hostname 217.11.12.13 User cirros Host master1 Hostname 10.0.1.10 Host master2 Hostname 10.0.1.8 Host worker1 Hostname 10.0.1.13 Host worker2 Hostname 10.0.1.5 Host master* worker* ProxyCommand ssh bastion -W %h:%p User ubuntu
Generate certificates
# Provision the Certificate Authority (CA) cat > ca-config.json << EOF { "signing": { "default": { "expiry": "8760h" }, "profiles": { "kubernetes": { "usages": ["signing", "key encipherment", "server auth", "client auth"], "expiry": "8760h" } } } } EOF cat > ca-csr.json << EOF { "CN": "Kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "Kubernetes", "OU": "CA", "ST": "Oregon" } ] } EOF cfssl gencert -initca ca-csr.json | cfssljson -bare ca
# Admin client certificate cat > admin-csr.json << EOF { "CN": "admin", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "system:masters", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ admin-csr.json | cfssljson -bare admin
WORKER0_HOST=worker1.pool.dev.example.com # Public hostname of your first worker node cloud server WORKER0_IP=10.0.1.6 # Private IP of your first worker node cloud server WORKER1_HOST=worker2.pool.dev.example.com #Public hostname of your second worker node cloud server WORKER1_IP=10.0.1.5 # Private IP of your second worker node cloud server # Kubelet client certificate cat > ${WORKER0_HOST}-csr.json << EOF { "CN": "system:node:${WORKER0_HOST}", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "system:nodes", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -hostname=${WORKER0_IP},${WORKER0_HOST} \ -profile=kubernetes \ ${WORKER0_HOST}-csr.json | cfssljson -bare ${WORKER0_HOST} cat > ${WORKER1_HOST}-csr.json << EOF { "CN": "system:node:${WORKER1_HOST}", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "system:nodes", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -hostname=${WORKER1_IP},${WORKER1_HOST} \ -profile=kubernetes \ ${WORKER1_HOST}-csr.json | cfssljson -bare ${WORKER1_HOST} # Kube Controller Manager client cert cat > kube-controller-manager-csr.json << EOF { "CN": "system:kube-controller-manager", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "system:kube-controller-manager", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager # Kube Proxy client certificate cat > kube-proxy-csr.json << EOF { "CN": "system:kube-proxy", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "system:node-proxier", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ kube-proxy-csr.json | cfssljson -bare kube-proxy # Kube Scheduler client certificate cat > kube-scheduler-csr.json << EOF { "CN": "system:kube-scheduler", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "system:kube-scheduler", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ kube-scheduler-csr.json | cfssljson -bare kube-scheduler # Kubernetes API server certificate CERT_HOSTNAME=10.32.0.1,10.0.1.13,master1.pool.dev.example.com,10.0.1.8,master2,10.0.1.4,lb.pool.dev.example.com,127.0.0.1,localhost,kubernetes.default cat > kubernetes-csr.json << EOF { "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "Kubernetes", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -hostname=${CERT_HOSTNAME} \ -profile=kubernetes \ kubernetes-csr.json | cfssljson -bare kubernetes # Service account certificate cat > service-account-csr.json << EOF { "CN": "service-accounts", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "US", "L": "Portland", "O": "Kubernetes", "OU": "Kubernetes The Hard Way", "ST": "Oregon" } ] } EOF cfssl gencert \ -ca=ca.pem \ -ca-key=ca-key.pem \ -config=ca-config.json \ -profile=kubernetes \ service-account-csr.json | cfssljson -bare service-account
Copy certificates to nodes
# copy certificates to the controller nodes: #scp ca.pem worker1.pool.dev.example.com.pem worker2.pool.dev.example.com.pem worker1:~/ #scp ca.pem worker1.pool.dev.example.com.pem worker2.pool.dev.example.com.pem worker2:~/ scp worker1.pool.dev.example.com.pem worker1:~/worker1.pem scp worker1.pool.dev.example.com-key.pem worker1:~/worker1-key.pem scp worker1.pool.dev.example.com.kubeconfig worker1:~/worker1.kubeconfig scp ca.pem worker1:~/ scp kube-proxy.kubeconfig worker1:~/ scp worker2.pool.dev.example.com.pem worker2:~/worker2.pem scp worker2.pool.dev.example.com-key.pem worker2:~/worker2-key.pem scp worker2.pool.dev.example.com.kubeconfig worker2:~/worker2.kubeconfig scp ca.pem worker2:~/ scp kube-proxy.kubeconfig worker2:~/ # copy certificates to the worker nodes: scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem service-account-key.pem service-account.pem master1:~/ scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem service-account-key.pem service-account.pem master2:~/
Generate kubeconfig
KUBERNETES_ADDRESS=10.0.1.4 # Generate Kubelet Kubeconfigs for worker nodes for instance in worker1.pool.dev.example.com worker2.pool.dev.example.com; do kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://${KUBERNETES_ADDRESS}:6443 \ --kubeconfig=${instance}.kubeconfig kubectl config set-credentials system:node:${instance} \ --client-certificate=${instance}.pem \ --client-key=${instance}-key.pem \ --embed-certs=true \ --kubeconfig=${instance}.kubeconfig kubectl config set-context default \ --cluster=kubernetes-the-hard-way \ --user=system:node:${instance} \ --kubeconfig=${instance}.kubeconfig kubectl config use-context default --kubeconfig=${instance}.kubeconfig done # Generate a Kube-Proxy Kubeconfig kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://${KUBERNETES_ADDRESS}:6443 \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials system:kube-proxy \ --client-certificate=kube-proxy.pem \ --client-key=kube-proxy-key.pem \ --embed-certs=true \ --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default \ --cluster=kubernetes-the-hard-way \ --user=system:kube-proxy \ --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig # Generate a Kube-Controller-Manager Kubeconfig kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-credentials system:kube-controller-manager \ --client-certificate=kube-controller-manager.pem \ --client-key=kube-controller-manager-key.pem \ --embed-certs=true \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config set-context default \ --cluster=kubernetes-the-hard-way \ --user=system:kube-controller-manager \ --kubeconfig=kube-controller-manager.kubeconfig kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig # Generate a Kube-Scheduler Kubeconfig kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=kube-scheduler.kubeconfig kubectl config set-credentials system:kube-scheduler \ --client-certificate=kube-scheduler.pem \ --client-key=kube-scheduler-key.pem \ --embed-certs=true \ --kubeconfig=kube-scheduler.kubeconfig kubectl config set-context default \ --cluster=kubernetes-the-hard-way \ --user=system:kube-scheduler \ --kubeconfig=kube-scheduler.kubeconfig kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig # Generate an Admin Kubeconfig kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://127.0.0.1:6443 \ --kubeconfig=admin.kubeconfig kubectl config set-credentials admin \ --client-certificate=admin.pem \ --client-key=admin-key.pem \ --embed-certs=true \ --kubeconfig=admin.kubeconfig kubectl config set-context default \ --cluster=kubernetes-the-hard-way \ --user=admin \ --kubeconfig=admin.kubeconfig kubectl config use-context default --kubeconfig=admin.kubeconfig
Distribute kubeconfig
scp worker1.pool.dev.example.com.kubeconfig worker1.pool.dev.example.com:~/ scp worker2.pool.dev.example.com.kubeconfig worker2.pool.dev.example.com:~/ scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig master1:~/ scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig master2:~/
Generate encrpytion key
ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64) cat > encryption-config.yaml << EOF kind: EncryptionConfig apiVersion: v1 resources: - resources: - secrets providers: - aescbc: keys: - name: key1 secret: ${ENCRYPTION_KEY} - identity: {} EOF scp encryption-config.yaml master1:~/ scp encryption-config.yaml master2:~/
OPTIONAL: (re)copy required files to control nodes
scp admin.kubeconfig ca.pem ca-key.pem encryption-config.yaml kube-controller-manager.kubeconfig kubernetes-key.pem kubernetes.pem kube-scheduler.kubeconfig service-account-key.pem service-account.pem master1:
Etcd
https://github.com/etcd-io/etcd/releases/
# install etcd URL=https://github.com/etcd-io/etcd/releases/download/v3.4.13/etcd-v3.4.13-linux-amd64.tar.gz wget -q ${URL} -P /tmp tar xzf /tmp/etcd-v3.4.13-linux-amd64.tar.gz -C /tmp sudo cp /tmp/etcd-v*-linux-amd64/etcd* /usr/local/bin/ # configure etcd sudo mkdir -p /etc/etcd /var/lib/etcd sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/ # on fist node # CONTROLLER_0_INTERNAL_IP ETCD_NAME_1=$(hostname -f) INTERNAL_IP_1=$(hostname -i) ETCD_NAME_2=$(echo ${ETCD_NAME_1//1/2}) INTERNAL_IP_2=$(dig +short A ${ETCD_NAME_2}) INITIAL_CLUSTER=${ETCD_NAME_1}=https://${INTERNAL_IP_1}:2380,${ETCD_NAME_2}=https://${INTERNAL_IP_2}:2380 echo ${INITIAL_CLUSTER} # on second node # CONTROLLER_1_INTERNAL_IP ETCD_NAME_1=$(hostname -f) INTERNAL_IP_1=$(hostname -i) ETCD_NAME_2=$(echo ${ETCD_NAME_1//2/1}) INTERNAL_IP_2=$(dig +short A ${ETCD_NAME_2}) INITIAL_CLUSTER=${ETCD_NAME_1}=https://${INTERNAL_IP_1}:2380,${ETCD_NAME_2}=https://${INTERNAL_IP_2}:2380 # on both nodes cat << EOF | sudo tee /etc/systemd/system/etcd.service [Unit] Description=etcd Documentation=https://github.com/coreos [Service] ExecStart=/usr/local/bin/etcd \\ --name ${ETCD_NAME_1} \\ --cert-file=/etc/etcd/kubernetes.pem \\ --key-file=/etc/etcd/kubernetes-key.pem \\ --peer-cert-file=/etc/etcd/kubernetes.pem \\ --peer-key-file=/etc/etcd/kubernetes-key.pem \\ --trusted-ca-file=/etc/etcd/ca.pem \\ --peer-trusted-ca-file=/etc/etcd/ca.pem \\ --peer-client-cert-auth \\ --client-cert-auth \\ --initial-advertise-peer-urls https://${INTERNAL_IP_1}:2380 \\ --listen-peer-urls https://${INTERNAL_IP_1}:2380 \\ --listen-client-urls https://${INTERNAL_IP_1}:2379,https://127.0.0.1:2379 \\ --advertise-client-urls https://${INTERNAL_IP_1}:2379 \\ --initial-cluster-token etcd-cluster-0 \\ --initial-cluster ${INITIAL_CLUSTER} \\ --initial-cluster-state new \\ --data-dir=/var/lib/etcd Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF sudo systemctl daemon-reload sudo systemctl enable etcd sudo systemctl start etcd sudo systemctl status etcd sudo ETCDCTL_API=3 etcdctl member list -w table \ --endpoints=https://127.0.0.1:2379 \ --cacert=/etc/etcd/ca.pem \ --cert=/etc/etcd/kubernetes.pem \ --key=/etc/etcd/kubernetes-key.pem
Installing Kubernetes Control Plane Binaries on both control nodes
ssh master1 ssh master2 sudo mkdir -p /etc/kubernetes/config wget -q \ "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kube-apiserver" \ "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kube-controller-manager" \ "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kube-scheduler" \ "https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kubectl" # alternative download URL: # https://downloadkubernetes.com/ chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/ # DEPRECTED # https://kubernetes.io/docs/tasks/tools/install-kubectl/ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list sudo apt-get update sudo apt-get install -y kubectl # kubectl version --client
Configure kube-api systemd
sudo mkdir -p /var/lib/kubernetes/ sudo cp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \ service-account-key.pem service-account.pem \ encryption-config.yaml /var/lib/kubernetes/ #INTERNAL_IP=$(curl http://169.254.169.254/latest/meta-data/local-ipv4) INTERNAL_IP=$(hostname -i) CONTROLLER0_IP=$(dig +short A master1.$(hostname -d)) CONTROLLER1_IP=$(dig +short A master2.$(hostname -d)) cat <<EOF | sudo tee /etc/systemd/system/kube-apiserver.service [Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-apiserver \\ --advertise-address=${INTERNAL_IP} \\ --allow-privileged=true \\ --apiserver-count=3 \\ --audit-log-maxage=30 \\ --audit-log-maxbackup=3 \\ --audit-log-maxsize=100 \\ --audit-log-path=/var/log/audit.log \\ --authorization-mode=Node,RBAC \\ --bind-address=0.0.0.0 \\ --client-ca-file=/var/lib/kubernetes/ca.pem \\ --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\ --etcd-cafile=/var/lib/kubernetes/ca.pem \\ --etcd-certfile=/var/lib/kubernetes/kubernetes.pem \\ --etcd-keyfile=/var/lib/kubernetes/kubernetes-key.pem \\ --etcd-servers=https://$CONTROLLER0_IP:2379,https://$CONTROLLER1_IP:2379 \\ --event-ttl=1h \\ --encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\ --kubelet-certificate-authority=/var/lib/kubernetes/ca.pem \\ --kubelet-client-certificate=/var/lib/kubernetes/kubernetes.pem \\ --kubelet-client-key=/var/lib/kubernetes/kubernetes-key.pem \\ --kubelet-https=true \\ --runtime-config='api/all=true' \\ --service-account-key-file=/var/lib/kubernetes/service-account.pem \\ --service-cluster-ip-range=10.32.0.0/24 \\ --service-node-port-range=30000-32767 \\ --tls-cert-file=/var/lib/kubernetes/kubernetes.pem \\ --tls-private-key-file=/var/lib/kubernetes/kubernetes-key.pem \\ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
Configure Kubernetes Controller Manager
cat <<EOF | sudo tee /etc/systemd/system/kube-controller-manager.service [Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-controller-manager \\ --bind-address=0.0.0.0 \\ --cluster-cidr=10.200.0.0/16 \\ --cluster-name=kubernetes \\ --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \\ --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \\ --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \\ --leader-elect=true \\ --root-ca-file=/var/lib/kubernetes/ca.pem \\ --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \\ --service-cluster-ip-range=10.32.0.0/24 \\ --use-service-account-credentials=true \\ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
Configure Kubernetes Scheduler
cat <<EOF | sudo tee /etc/kubernetes/config/kube-scheduler.yaml apiVersion: kubescheduler.config.k8s.io/v1alpha1 kind: KubeSchedulerConfiguration clientConnection: kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" leaderElection: leaderElect: true EOF cat <<EOF | sudo tee /etc/systemd/system/kube-scheduler.service [Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-scheduler \\ --config=/etc/kubernetes/config/kube-scheduler.yaml \\ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
Start Kubernetes services
sudo systemctl daemon-reload sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler
Test Kubernetes services state
sudo systemctl status kube-apiserver kube-controller-manager kube-scheduler kubectl get componentstatuses --kubeconfig admin.kubeconfig
Enable HTTP Health Checks
sudo apt-get install -y nginx cat > kubernetes.default.svc.cluster.local << EOF server { listen 80; server_name kubernetes.default.svc.cluster.local; location /healthz { proxy_pass https://127.0.0.1:6443/healthz; proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; } } EOF sudo mv kubernetes.default.svc.cluster.local /etc/nginx/sites-available/kubernetes.default.svc.cluster.local sudo ln -s /etc/nginx/sites-available/kubernetes.default.svc.cluster.local /etc/nginx/sites-enabled/ sudo systemctl restart nginx sudo systemctl enable nginx
RBAC for Kubelet Authorization
cat << EOF | kubectl apply --kubeconfig admin.kubeconfig -f - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:kube-apiserver-to-kubelet rules: - apiGroups: - "" resources: - nodes/proxy - nodes/stats - nodes/log - nodes/spec - nodes/metrics verbs: - "*" EOF cat << EOF | kubectl apply --kubeconfig admin.kubeconfig -f - apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: system:kube-apiserver namespace: "" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:kube-apiserver-to-kubelet subjects: - apiGroup: rbac.authorization.k8s.io kind: User name: kubernetes EOF
Configure Kubernetes API frontend LB
sudo apt-get install -y nginx sudo systemctl enable nginx sudo mkdir -p /etc/nginx/tcpconf.d sudo vi /etc/nginx/nginx.conf echo "include /etc/nginx/tcpconf.d/*;" | sudo tee -a diff /etc/nginx/nginx.conf CONTROLLER0_IP=$(host -t A master1 | cut -d' ' -f4) CONTROLLER1_IP=$(host -t A master2 | cut -d' ' -f4) cat << EOF | sudo tee /etc/nginx/tcpconf.d/kubernetes.conf stream { server { listen 6443; listen 443; proxy_pass kubernetes; } upstream kubernetes { server $CONTROLLER0_IP:6443; server $CONTROLLER1_IP:6443; } } EOF sudo nginx -s reload # DEBUG curl -k https://localhost:6443/version
Installing Worker Node Binaries
# disable swap sudo sed -e '/swap/ s/^#*/#/' -i /etc/fstab sudo swapoff -a sudo apt-get -y install socat conntrack ipset # NEW / TEST wget -q --show-progress \ https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.18.0/crictl-v1.18.0-linux-amd64.tar.gz \ https://storage.googleapis.com/kubernetes-the-hard-way/runsc \ https://github.com/opencontainers/runc/releases/download/v1.0.0-rc91/runc.amd64 \ https://github.com/containernetworking/plugins/releases/download/v0.8.6/cni-plugins-linux-amd64-v0.8.6.tgz \ https://github.com/containerd/containerd/releases/download/v1.3.6/containerd-1.3.6-linux-amd64.tar.gz \ https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kubectl \ https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kube-proxy \ https://storage.googleapis.com/kubernetes-release/release/v1.18.6/bin/linux/amd64/kubelet wget -q --show-progress \ https://github.com/kubernetes-incubator/cri-tools/releases/download/v1.0.0-beta.0/crictl-v1.0.0-beta.0-linux-amd64.tar.gz \ https://storage.googleapis.com/kubernetes-the-hard-way/runsc \ https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/runc.amd64 \ https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz \ https://github.com/containerd/containerd/releases/download/v1.1.0/containerd-1.1.0.linux-amd64.tar.gz \ https://storage.googleapis.com/kubernetes-release/release/v1.10.2/bin/linux/amd64/kubectl \ https://storage.googleapis.com/kubernetes-release/release/v1.10.2/bin/linux/amd64/kube-proxy \ https://storage.googleapis.com/kubernetes-release/release/v1.10.2/bin/linux/amd64/kubelet sudo mkdir -p \ /etc/cni/net.d \ /opt/cni/bin \ /var/lib/kubelet \ /var/lib/kube-proxy \ /var/lib/kubernetes \ /var/run/kubernetes chmod +x kubectl kube-proxy kubelet runc.amd64 runsc sudo mv runc.amd64 runc sudo mv kubectl kube-proxy kubelet runc runsc /usr/local/bin/ sudo tar -xzf crictl-v*-linux-amd64.tar.gz -C /usr/local/bin/ sudo tar -xzf cni-plugins-linux-amd64-v*.tgz -C /opt/cni/bin/ tar -xzf containerd-*-linux-amd64.tar.gz -C /tmp/ sudo mv /tmp/bin/* /bin/
Configuring Containerd
sudo mkdir -p /etc/containerd/ cat << EOF | sudo tee /etc/containerd/config.toml [plugins] [plugins.cri.containerd] snapshotter = "overlayfs" [plugins.cri.containerd.default_runtime] runtime_type = "io.containerd.runtime.v1.linux" runtime_engine = "/usr/local/bin/runc" runtime_root = "" [plugins.cri.containerd.untrusted_workload_runtime] runtime_type = "io.containerd.runtime.v1.linux" runtime_engine = "/usr/local/bin/runsc" runtime_root = "/run/containerd/runsc" EOF cat << EOF | sudo tee /etc/systemd/system/containerd.service [Unit] Description=containerd container runtime Documentation=https://containerd.io After=network.target [Service] ExecStartPre=/sbin/modprobe overlay ExecStart=/bin/containerd Restart=always RestartSec=5 Delegate=yes KillMode=process OOMScoreAdjust=-999 LimitNOFILE=1048576 LimitNPROC=infinity LimitCORE=infinity [Install] WantedBy=multi-user.target EOF
Configuring Kubelet
HOSTNAME=$(hostname) sudo mv ${HOSTNAME}-key.pem ${HOSTNAME}.pem /var/lib/kubelet/ sudo mv ${HOSTNAME}.kubeconfig /var/lib/kubelet/kubeconfig sudo mv ca.pem /var/lib/kubernetes/ cat << EOF | sudo tee /var/lib/kubelet/kubelet-config.yaml kind: KubeletConfiguration apiVersion: kubelet.config.k8s.io/v1beta1 authentication: anonymous: enabled: false webhook: enabled: true x509: clientCAFile: "/var/lib/kubernetes/ca.pem" authorization: mode: Webhook clusterDomain: "cluster.local" clusterDNS: - "10.32.0.10" runtimeRequestTimeout: "15m" tlsCertFile: "/var/lib/kubelet/${HOSTNAME}.pem" tlsPrivateKeyFile: "/var/lib/kubelet/${HOSTNAME}-key.pem" EOF cat << EOF | sudo tee /etc/systemd/system/kubelet.service [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/kubernetes/kubernetes After=containerd.service Requires=containerd.service [Service] ExecStart=/usr/local/bin/kubelet \\ --config=/var/lib/kubelet/kubelet-config.yaml \\ --container-runtime=remote \\ --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \\ --image-pull-progress-deadline=2m \\ --kubeconfig=/var/lib/kubelet/kubeconfig \\ --network-plugin=cni \\ --register-node=true \\ --v=2 \\ --hostname-override=$(hostname -f) \\ --allow-privileged=true Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
Configuring Kube-Proxy
sudo mv kube-proxy.kubeconfig /var/lib/kube-proxy/kubeconfig cat << EOF | sudo tee /var/lib/kube-proxy/kube-proxy-config.yaml kind: KubeProxyConfiguration apiVersion: kubeproxy.config.k8s.io/v1alpha1 clientConnection: kubeconfig: "/var/lib/kube-proxy/kubeconfig" mode: "iptables" clusterCIDR: "10.200.0.0/16" EOF cat << EOF | sudo tee /etc/systemd/system/kube-proxy.service [Unit] Description=Kubernetes Kube Proxy Documentation=https://github.com/kubernetes/kubernetes [Service] ExecStart=/usr/local/bin/kube-proxy \\ --config=/var/lib/kube-proxy/kube-proxy-config.yaml Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF sudo systemctl daemon-reload sudo systemctl enable containerd kubelet kube-proxy sudo systemctl restart containerd kubelet kube-proxy sudo systemctl status containerd kubelet kube-proxy kubectl get nodes
Configuring Kubectl to remote access a Kubernetes cluster
cd ~/kthw kubectl config set-cluster kubernetes-the-hard-way \ --certificate-authority=ca.pem \ --embed-certs=true \ --server=https://lb.example.com:6443 kubectl config set-credentials admin \ --client-certificate=admin.pem \ --client-key=admin-key.pem kubectl config set-context kubernetes-the-hard-way \ --cluster=kubernetes-the-hard-way \ --user=admin kubectl config use-context kubernetes-the-hard-way kubectl get pods kubectl get nodes kubectl version
Test Me
# Enable IP forwarding on worker nodes sudo sysctl net.ipv4.conf.all.forwarding=1 echo "net.ipv4.conf.all.forwarding=1" | sudo tee -a /etc/sysctl.conf # ssh -L 6443:localhost:6443 user@<your Load balancer cloud server public IP> kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.IPALLOC_RANGE=10.200.0.0/16" # test network kubectl get pods -n kube-system # Create an Nginx deployment with 2 replicas cat << EOF | kubectl apply -f - apiVersion: apps/v1 kind: Deployment metadata: name: nginx spec: selector: matchLabels: run: nginx replicas: 2 template: metadata: labels: run: nginx spec: containers: - name: my-nginx image: nginx ports: - containerPort: 80 EOF # Create a service for that deployment kubectl expose deployment/nginx # Deploy test container kubectl run busybox --image=radial/busyboxplus:curl --command -- sleep 3600 POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}") # show endpoints kubectl get ep nginx # test connection between pods kubectl exec $POD_NAME -- curl <first nginx pod IP address> kubectl exec $POD_NAME -- curl <second nginx pod IP address> # test service connetion kubectl get svc kubectl exec $POD_NAME -- curl <nginx service IP address> # cleanup network test objects kubectl delete deployment busybox kubectl delete deployment nginx kubectl delete svc nginx
Deploying Kube-DNS
# Install Kube-DNS kubectl create -f https://storage.googleapis.com/kubernetes-the-hard-way/kube-dns.yaml # Check kube-DNS pod kubectl get pods -l k8s-app=kube-dns -n kube-system # Test kube-DNS kubectl run busybox --image=busybox:1.28 --command -- sleep 3600 POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}") kubectl exec -ti $POD_NAME -- nslookup kubernetes # cleanup kubectl delete deployment busybox
Test deployment
# Create secret kubectl create secret generic kubernetes-the-hard-way --from-literal="mykey=mydata" # Test encryption ssh controller1 sudo ETCDCTL_API=3 etcdctl get \ --endpoints=https://127.0.0.1:2379 \ --cacert=/etc/etcd/ca.pem \ --cert=/etc/etcd/kubernetes.pem \ --key=/etc/etcd/kubernetes-key.pem \ /registry/secrets/default/kubernetes-the-hard-way | hexdump -C # Create nginx deployment kubectl run nginx --image=nginx # Verify pod kubectl get pods -l run=nginx # get pod name POD_NAME=$(kubectl get pods -l run=nginx -o jsonpath="{.items[0].metadata.name}") # forward port 8081 to the nginx pod kubectl port-forward ${POD_NAME} 8081:80 # test port forward curl --head http://127.0.0.1:8081 # get logs from the nginx pod kubectl logs ${POD_NAME} # run exec command inside the nginx pod kubectl exec -ti $POD_NAME -- nginx -v # create service kubectl expose deployment nginx --port 80 --type NodePort # get port assigned to service kubectl get svc # connect to port from worker node curl -I localhost:<node port> # cleanup kubectl delete secret kubernetes-the-hard-way kubectl delete svc nginx kubectl delete deployment nginx