OpenShift

apt-get install docker-engine=1.13.1-0~ubuntu-xenial

wget -q --no-check-certificate https://raw.githubusercontent.com/panticz/installit/master/install.openshift.sh -O - | bash -

#!/bin/bash

. /etc/os-release
if [ "${ID_LIKE}" == "debian" ]; then
    wget -q --no-check-certificate https://raw.githubusercontent.com/panticz/installit/master/install.docker.sh -O - | bash -
    sudo apt-get install git wget
    sudo sed -i 's|ExecStart=/usr/bin/dockerd -H fd://|ExecStart=/usr/bin/dockerd -H fd:// --insecure-registry 172.30.0.0/16|g' /etc/systemd/system/multi-user.target.wants/docker.service
    systemctl daemon-reload
else
    yum install -y docker git wget
    sudo sed -i "s|# INSECURE_REGISTRY='--insecure-registry'|INSECURE_REGISTRY='--insecure-registry 172.30.0.0/16'|g" /etc/sysconfig/docker
fi

systemctl enable docker
systemctl start docker

wget https://github.com/openshift/origin/releases/download/v1.5.0-rc.0/openshift-origin-client-tools-v1.5.0-rc.0-49a4a7a-linux-64bit.tar.gz -qO /tmp/openshift-origin-client-tools-linux-64bit.tar.gz 
tar xzf /tmp/openshift-origin-client-tools-linux-64bit.tar.gz -C /tmp/
cp /tmp/openshift-origin-client-tools-*/oc /usr/local/bin/

rm -rf /tmp/openshift-origin-client-tools*

# create test project
oc cluster up #--public-hostname=openshift.example.com --loglevel=10
oc login -u developer -p developer
oc whoami
oc new-app -L
oc projects
oc new-app openshift/ruby-20-centos7~https://github.com/openshift/ruby-ex
oc get all
oc get pod -w
oc logs -f ruby-ex-1-build
oc get services
oc expose service ruby-ex #--hostname=app1.example.com
oc get route

echo "192.168.178.31 ruby-ex-myproject.192.168.178.31.xip.io" >> /etc/hosts

Manage OpenShift with Ansible
https://github.com/openshift/openshift-ansible
https://www.ansible.com/blog/2013/09/19/deploying-highly-available-openshift-origin-clusters

# Ansible playbook
https://github.com/panticz/ansible/tree/master/roles/ansible

Links
https://www.openshift.org/
https://github.com/openshift/origin/blob/master/docs/cluster_up_down.md#linux
https://github.com/openshift/origin/releases/latest
https://github.com/OpenShiftDemos/openshift-cd-demo

Rundeck Ansible Plugin

https://github.com/Batix/rundeck-ansible-plugin

https://github.com/Batix/rundeck-ansible-plugin/releases/latest

wget https://github.com/Batix/rundeck-ansible-plugin/releases/download/2.0.2/ansible-plugin-2.0.2.jar -P /var/lib/rundeck/libext/

* Create a new project
http:///resources/createProject

Project Name: Ansible
Default Node Executor: "Ansible Ad-Hoc Node Executor"
Executable: /bin/bash
SSH Connection > SSH Authentication: privateKey

Default Node File Copier
SSH Connection: privateKey

apt-get install ansible

Rundeck (Job scheduler and Runbook automation)

ansible-playbook rundeck-ansible.example.com.yml -i your_inventory

---
hosts: rundeck-ansible.example.com
  vars:
    port_redirect:
      from: 4440
      to: 80
    install_ansible_plugin: true
  roles:
    - ansible
    - rundeck

Ansible role
https://github.com/panticz/ansible/tree/master/roles/rundeck
https://github.com/panticz/ansible/blob/master/rundeck.example.com.yml

# install
wget http://dl.bintray.com/rundeck/rundeck-deb/rundeck-2.7.1-1-GA.deb -P /tmp
sudo dpkg -i /tmp/rundeck-2.7.1-1-GA.deb
sudo /etc/init.d/rundeckd start

# Rundeck CLI
sudo apt-get install rundeck-cli
https://rundeck.github.io/rundeck-cli/

# Admin
http://localhost:4440/user/login;jsessionid=16lnsilhg9n6x38a090agwe8y
admin/admin

# configuration (global)
/etc/rundeck/framework.properties

# job database
/var/lib/rundeck/data/rundeckdb.mv.db

# add user
echo "foo:bar,user,devops" >> /etc/rundeck/realm.properties

# acl
/var/rundeck/projects//acls/.aclpolicy

Configuration (project)
# configure nodes
/var/rundeck/projects//etc/resources.xml

# use native ssh agent to access host behind proxy / bastion
/var/rundeck/projects//etc/project.properties
plugin.script-exec.default.command=/usr/bin/ssh ${node.username}@${node.hostname} ${exec.command}
plugin.script-copy.default.command=/usr/bin/scp ${file-copy.file} ${node.username}@${node.hostname}\:${file-copy.destination}

Email notification
http://rundeck.org/docs/administration/email-settings.html

# /etc/rundeck/rundeck-config.properties
grails.mail.host=smtp.example.com
grails.mail.port=25
grails.mail.username=foo
grails.mail.password=bar

# restart service
service rundeckd restart

# ssh
mkdir /var/lib/rundeck/.ssh
chown rundeck:rundeck /var/lib/rundeck/.ssh
chmod 700 /var/lib/rundeck/.ssh
touch /var/lib/rundeck/.ssh/id_rsa
chown rundeck:rundeck /var/lib/rundeck/.ssh/id_rsa
chmod 600 /var/lib/rundeck/.ssh/id_rsa

# log
tail -f /var/log/rundeck/*.log

# Documentation
http://rundeck.org/
http://rundeck.org/docs/manual/getting-started.html
http://rundeck.org/2.3.2/administration/configuring-ssl.html

# Plugins
http://rundeck.org/plugins/ansible/2016/03/11/ansible-plugin.html
http://rundeck.org/plugins/2013/01/01/jenkins-rundeck.html
http://rundeck.org/plugins/2013/01/01/aws-ec2-nodes.html

# Docker
https://github.com/x110dc/docker-rundeck

Change default admin password
# http://rundeck.org/2.3.2/administration/authenticating-users.html
RD_PASS=$(openssl rand -base64 16)
echo ${RD_PASS}
RD_PASS_MD5=$(java -cp /var/lib/rundeck/bootstrap/jetty-all-9.0.7.v20131107.jar org.eclipse.jetty.util.security.Password admin ${RD_PASS} 2>&1 | grep MD5)
sed -i "s/^admin:admin/admin:MD5:${RD_PASS_MD5}/g" /etc/rundeck/realm.properties
service rundeckd restart

# echo "framework.server.password = MD5:${RD_PASS_MD5}" >> /etc/rundeck/framework.properties

# Notify icinga
Local Command:
ssh monitoring.example.com '/usr/bin/printf "[%lu] SCHEDULE_FORCED_SVC_CHECK;%s;%s;%s\n" $(date +%s) ${node.name} APT $(date +%s) | tee -a /var/lib/icinga/rw/icinga.cmd'

# User authentification
http://rundeck.org/docs/administration/authenticating-users.html

Changelog
http://rundeck.org/docs/history/changelog.html

Rundeck jobs and scripts repository
https://github.com/panticz/rundeck

Job options
http://rundeck.org/2.0.0/manual/job-options.html

Pipe command
echo ${option.RSA} | tee /tmp/debug.txt

Ansible snippets

# show user and host
- debug:
    msg="{{ ansible_user_id }}@{{ inventory_hostname }}"
 
- debug:
    msg="ansible_default_ipv4["address"]"
 
- name: set default value
  debug:
    msg: "{{ foo | default('bar') }}"
 
# check if directory exists
- block:
  - name: Remove default configuration
    file:
      state: absent
      path: /etc/icinga/objects
    when: check_path.stat.exists == false
 
  - name: Deploy configuration
    git:
      repo: git@git.example.com:foo/icinga.git
      dest: /etc/icinga/objects
      accept_hostkey: yes
    notify: icinga restart
 
  when

Debian / Ubuntu mass dist-upgrade with Ansible (with fallback and logging)

ansible-playbook dist-upgrade.yml -i your_inventory [-l host_name]

---
- hosts:
    all
  gather_facts: no
  vars:
    verbose: false
    log_dir: "log/dist-upgrade/{{ inventory_hostname }}"
  pre_tasks:
    - block:
        - setup:
      rescue:
        - name: "Install required python-minimal package"
          raw: "apt-get update && apt-get install -y --force-yes python-apt python-minimal"
        - setup:
  tasks:
    - name: Update packages
      apt:
        update_cache: yes
        upgrade: dist
        autoremove: yes
      register: output

    - name: Check changes
      set_fact:
        updated: true
      when: not output.stdout | search("0 upgraded, 0 newly installed")

    - name: Display changes
      debug:
        msg: "{{ output.stdout_lines }}"
      when: verbose or updated is defined

    - block:
      - name: "Create log directory"
        file:
          path: "{{ log_dir }}"
          state: directory
        changed_when: false

      - name: "Write changes to logfile"
        copy:
          content: "{{ output.stdout }}"
          dest: "{{ log_dir }}/dist-upgrade_{{ ansible_date_time.iso8601 }}.log"
        changed_when: false

      when: updated is defined
      connection: local

Sonoff / ITEAD ESP8266

Flash ESPEasy with a FTDI adapter
sudo apt-get install -y unzip wget python-minimal python-serial
wget -q https://codeload.github.com/espressif/esptool/zip/master -qO /tmp/espressif.zip
unzip /tmp/espressif.zip -d /tmp

wget http://www.letscontrolit.com/downloads/ESPEasy_R147_RC8.zip -qO /tmp/ESPEasy_R147_RC8.zip
unzip /tmp/ESPEasy_R147_RC8.zip -d /tmp
/tmp/esptool-master/esptool.py --port /dev/ttyUSB0 write_flash --flash_mode dio --flash_size 1MB 0x0 /tmp/ESPEasy_R147_1024.bin

# Connect to temporary WiFi access point
SSID: ESP_0
pass: configesp

ZFS filesystem on Linux

boot Ubuntu 16.04 LiveCD
terminal
sudo apt-get install -y ssh
sudo passwd ubuntu
ip a

SSH login to Ubuntu LiveCD
ssh ubuntu@

sudo su

apt-add-repository universe
apt update

apt install -y debootstrap zfs-initramfs

# rmove previous ZFS pool
zpool export rpool

DEVICES="
/dev/disk/by-id/ata-TOSHIBA_DT01ACA200_938457298
/dev/disk/by-id/ata-TOSHIBA_DT01ACA200_9D0JN9VR4
"

for DEVICE in ${DEVICES}; do
zpool labelclear -f ${DEVICE}
parted -s -- ${DEVICE} mklabel gpt
sgdisk -a1 -n2:40:2047 -t2:EF02 ${DEVICE}

webix

datatable
# reload from external source
datatable1.clearAll();
datatable1.load(grida.config.url);

Webix Remote
http://docs.webix.com/desktop__webix_remote_php.html - Webix Remote with PHP

# pass paramter to remote funtion
var result = webix.remote.function1(foo, bar);

# show return value from remote function as webix message
var result = webix.remote.MyClass.select(val1);
result.then((data) => webix.message("msg:" + data));

send data
# post
webix.ajax().post("post.php", {foo:bar});

Webix Jet

DRBD


# cat /etc/drbd.d/global_common.conf
global {
usage-count yes;
}

common {
startup {
degr-wfc-timeout 0;
}

net {
cram-hmac-alg sha1;
shared-secret ****************;
}

disk {
on-io-error detach;
}
}

# cat /etc/drbd.d/r0.res
resource r0 {
on scld.sedo.de.intern {
volume 0 {
device /dev/drbd0;
disk /dev/vg0/lvol0;
flexible-meta-disk internal;
}
address 192.168.255.1:7788;
}
on ubuntu {
volume 0 {
device /dev/drbd0;
disk /dev/sda3;
flexible-meta-disk internal;
}
address 192.168.255.2:7788;
}

Syndicate content