Wer analog denkt, wird die Vorteile der Digitalisierung nie verstehen. - Ruoß, Marc   

Die Nachfolgenden Skripts installieren mikrok8s von Beginn an. Diese Skripts werden als ansible-User am Monitoring-Host gestartet. Es ist (fast) kein Einloggen auf den Cluster-Nodes notwendig. Diese Skripts setzten eine funktionierendes Grundsystem voraus, sowie eine korrekte ansible Installation und Konfiguration.

#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 07:40:46 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 436 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/gluster_endpoints.sh $
#    $Id: gluster_endpoints.sh 436 2021-05-24 05:40:46Z alfred $
#
# Generieren der glusterfs-Endpoints für microk8s
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
#shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Generieren der Endpoints

wd=${1}
pfport="-server.listen-port="
sfport=""
pfpc="/usr/sbin/glusterfsd -s "
sfpc=" --volfile-id "

# Erzeugen der Volume-Info-Files
ssh pc1 'ls /data/glusterfs/pcvol/brick1/' > ${wd}/volumes.txt
while read -r f; do
  ssh pc1 "sudo gluster volume info ${f}" > ${wd}/${f}.txt &
done < "${wd}/volumes.txt"
wait

# Erzeugen der Server-Port-files
ssh pc1 "sudo service glusterd status" > ${wd}/pc1-server-listen-port.txt &
ssh pc2 "sudo service glusterd status" > ${wd}/pc2-server-listen-port.txt &
ssh pc3 "sudo service glusterd status" > ${wd}/pc3-server-listen-port.txt &
ssh pc4 "sudo service glusterd status" > ${wd}/pc4-server-listen-port.txt &
ssh pc5 "sudo service glusterd status" > ${wd}/pc5-server-listen-port.txt &
wait

# Erzeugen der Endpoint-Files
ssh pc1 'ls /data/glusterfs/pcvol/brick1/' > ${wd}/volumes.txt
while read -r f; do
  echo "Processing ${f} file..."

  fname="${wd}/microk8s_endpoint_${f}.yaml"
  echo "Volume ${fname} file..."
  echo "#" > ${fname}
  while read -r voli; do
    echo "# ${voli} " >> ${fname}
  done < "${wd}/${f}.txt"

  echo "# " >> ${fname}
  echo "---" >> ${fname}
  echo "apiVersion: v1" >> ${fname}
  echo "kind: Endpoints" >> ${fname}
  echo "metadata:" >> ${fname}
  echo "  name: gluster-${f}" >> ${fname}
  echo "  namespace: default" >> ${fname}
  echo "subsets:" >> ${fname}

  while read -r slp; do
    echo "# ${slp} " >> ${fname}
    xport=${slp#*"$pfport"}
    xport=${xport%"$sfport"*}
    xpc=${slp#*"$pfpc"}
    xpc=${xpc%"$sfpc"*}
    echo "- addresses:" >> ${fname}

    while read -r ip; do
        ip=${ip%"$xpc"*}
        echo "  - ip: ${ip}" >> ${fname}
    done < <(cat /etc/hosts | grep -i ${xpc})

    echo "  ports:" >> ${fname}
    echo "  - port: ${xport}" >> ${fname}
    echo "    protocol: TCP" >> ${fname}
  done < <(cat ${wd}/*server-listen-port.txt | grep -i "${f}-server.listen-port=");
  echo '############################################################################################' >> ${fname}
  echo '#    \$Date: 2021-05-24 07:40:46 +0200 (Mo, 24. Mai 2021) $' >> ${fname}
  echo '#    \$Revision: 436 $' >> ${fname}
  echo '#    \$Author: alfred $' >> ${fname}
  echo '#    \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/gluster_endpoints.sh $' >> ${fname}
  echo '#    \$Id: gluster_endpoints.sh 436 2021-05-24 05:40:46Z alfred $' >> ${fname}
  echo '#' >> ${fname}
  echo '# Dieses Datei wurde automatisch generiert' >> ${fname}
  echo '#' >> ${fname}
  echo '############################################################################################' >> ${fname}

done < "${wd}/volumes.txt"

exit


#!/bin/bash
############################################################################################
#    $Date: 2021-05-23 22:13:40 +0200 (So, 23. Mai 2021) $
#    $Revision: 424 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation.sh $
#    $Id: microk8s_Installation.sh 424 2021-05-23 20:13:40Z alfred $
#
# Schnell-Installation microk8s
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Installation der Serviceaccounts
#
ansible pc -m shell -a 'sudo apt-get update'
ansible pc -m shell -a 'sudo apt-get upgrade -y'
# Check von Files
ansible pc -m shell -a 'df -h'
ansible pc -m shell -a 'sudo cat /boot/firmware/cmdline.txt'
ansible pc -m shell -a 'sudo cat /etc/hosts'
ansible pc -m shell -a 'sudo cat /etc/hostname'
ansible pc -m shell -a 'sudo cat /etc/systemd/timesyncd.conf '
ansible pc -m shell -a 'sudo cat /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg '
ansible pc -m shell -a 'sudo cat /etc/netplan/50-cloud-init.yaml '
# Prüfen der Files auf Konsistenz
ansible pc -m shell -a 'sudo apt-get -y install open-iscsi'
ansible pc -m shell -a 'sudo systemctl enable iscsid'
ansible pc -m shell -a 'sudo systemctl start iscsid'
ansible pc -m shell -a 'sudo systemctl status iscsid'
ansible pc -m shell -a 'sudo apt-get install -y mc sshfs tree'
ansible pc -m shell -a 'sudo apt-get install bash-completion -y'
# Jetzt kommt microk8s
#####           ansible pc -m shell -a 'sudo snap remove microk8s'
ansible pc -m shell -a 'sudo snap install microk8s --classic --channel=1.21/stable'
ansible pc -m shell -a 'sudo snap info microk8s | grep -i tracking'
ansible pc -m shell -a 'sudo usermod -a -G microk8s alfred'
ansible pc -m shell -a 'sudo usermod -a -G microk8s ansible'
ansible pc -m shell -a 'sudo shutdown -r now'
#
ansible pc -m shell -a 'microk8s start'
ansible pc -m shell -a 'microk8s status --wait-ready'
ansible pc -m shell -a 'microk8s inspect'
ansible pc -m shell -a 'microk8s enable storage dns rbac ha-cluster'
ansible pc -m shell -a 'microk8s status --wait-ready'
#
# Nun müssen händisch die Nodes zu einem cluster verbunden werden
##



#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 06:56:04 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 425 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_manual.sh $
#    $Id: microk8s_Installation_manual.sh 425 2021-05-24 04:56:04Z alfred $
#
# Händische Tasks - Verbinden der Nodes zu einem Cluster
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# auf einem Node
microk8s add-node
# auf jedem Node verbinden

#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 06:56:04 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 425 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil1_bash_aliases.sh $
#    $Id: microk8s_Installation_teil1_bash_aliases.sh 425 2021-05-24 04:56:04Z alfred $
#
# Schnell-Installation bash-aliases für ansible und alfred
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Installation der Serviceaccounts
#
ansible pc -m shell -a 'sudo microk8s.kubectl completion bash >/etc/bash_completion.d/kubectl'
ansible pc -m shell -a 'cat <<EOF > .bash_aliases
#
#    $Date: 2021-05-24 06:56:04 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 425 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil1_bash_aliases.sh $
#    $Id: microk8s_Installation_teil1_bash_aliases.sh 425 2021-05-24 04:56:04Z alfred $
#
#Shortcut um sich tipparbeit zu sparen
alias kubectl='\''microk8s.kubectl'\''
alias k='\''kubectl'\''
alias kall='\''microk8s kubectl get all --all-namespaces && microk8s kubectl get ingresses --all-namespaces && microk8s kubectl get endpoints --all-namespaces'\''
#Zeigt die logs der Ingress-Pods
alias klt='\''(kubectl get pods --all-namespaces) | grep -i nginx-ingress-microk8s-controller | while read a b c; do kubectl logs "\$b" -n ingress; done'\''
alias helm='\''microk8s.helm3'\''
EOF
'
ansible pc -m shell -a 'sudo cp -f /home/ansible/.bash_aliases /home/alfred/.bash_aliases '
ansible pc -m shell -a 'sudo chown alfred:alfred /home/alfred/.bash_aliases '
ansible pc -m shell -a 'sudo chmod 664 /home/alfred/.bash_aliases '
#
ansible pc -m shell -a 'cp .bashrc .bashrc.copy'
ansible pc -m lineinfile -a 'dest=~/.bashrc state=present regexp=rpc_json line='\''complete -F __start_kubectl k'\'''
ansible pc -m shell -a 'sudo cp -f /home/ansible/.bashrc /home/alfred/.bashrc '
ansible pc -m shell -a 'sudo chown alfred:alfred /home/alfred/.bashrc '
ansible pc -m shell -a 'sudo chmod 664 /home/alfred/.bashrc '
##
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 06:56:04 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 425 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil2_helm.sh $
#    $Id: microk8s_Installation_teil2_helm.sh 425 2021-05-24 04:56:04Z alfred $
#
# Schnell-Installation microk8s - helm und arkade - beide benutzen wir nicht
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition

exit
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
ansible pc -m shell -a 'microk8s status --wait-ready'
#ohne helm und arkade
ansible pc -m shell -a 'microk8s enable helm3'
ansible pc -m shell -a 'curl -sLS https://dl.get-arkade.dev | sudo sh'
# Struktur für Arkade, wenn man das benutzen will
ansible pc -m shell -a 'mkdir -p .local/bin' # Lokales Directory von ansible
ansible pc -m shell -a 'ln -s /snap/bin/microk8s.kubectl .local/bin/kubectl'
#
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 11:30:32 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 452 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil3_dashboard.sh $
#    $Id: microk8s_Installation_teil3_dashboard.sh 452 2021-05-24 09:30:32Z alfred $
#
# Schnell-Installation microk8s - Dashboard einrichten
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster

ansible pc1 -m shell -a 'microk8s enable dashboard'
ansible pc -m shell -a 'microk8s status --wait-ready'
#
# https://github.com/kubernetes/dashboard/blob/master/docs/user/access-control/creating-sample-user.md
# Installation der RBAC für das Dashboard, sonst kann man nicht damit arbeiten.
#
app="mikrok8s/install/dashboard"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 452 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}

cat <<EOF > ${wd}/sc-admin-user.yaml
#  \$Date: 2021-05-24 11:30:32 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 452 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil3_dashboard.sh $
#  \$Id: microk8s_Installation_teil3_dashboard.sh 452 2021-05-24 09:30:32Z alfred $
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin-user
  namespace: kube-system
EOF

cat <<EOF > ${wd}/crb-admin-user.yaml
#  \$Date: 2021-05-24 11:30:32 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 452 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil3_dashboard.sh $
#  \$Id: microk8s_Installation_teil3_dashboard.sh 452 2021-05-24 09:30:32Z alfred $
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: admin-user
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: admin-user
  namespace: kube-system
EOF

ansible pc1 -m shell -a 'microk8s kubectl apply -f '${id}'/.'

# Dieses Statement funktioniert nur vor Ort.
# microk8s kubectl -n kube-system get secret $(microk8s kubectl -n kube-system get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
#
##
## Dieses Secret im Dashboard eingeben.
## Das Standard-Token geht nicht, sobald rbac aktiviert ist.
##
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 07:27:43 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 434 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil3_longhorn.sh $
#    $Id: microk8s_Installation_teil3_longhorn.sh 434 2021-05-24 05:27:43Z alfred $
#
# Schnell-Installation microk8s - Longhorn nehmen wir nicht, dafür ist der Raspi-Cluster nicht stark genug
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
exit


## Jetzt kommt Longhorn
ansible pc -m shell -a 'sudo wipefs -a /dev/sda'
ansible pc -m shell -a 'sudo mkfs.ext4 /dev/sda'
ansible pc -m shell -a 'sudo mkdir /var/lib/longhorn' # Das ist der Default
ansible pc -m shell -a 'sudo mount /dev/sda /var/lib/longhorn'
ansible pc -m shell -a 'sudo printf $(sudo blkid -o export /dev/sda | grep UUID)" /var/lib/longhorn       ext4    defaults        0       2" | sudo tee -a /etc/fstab'
ansible pc -m shell -a 'cat /etc/fstab'
ansible pc -m shell -a 'sudo apt-get update'
ansible pc -m shell -a 'sudo apt-get -y install open-iscsi'
ansible pc -m shell -a 'sudo systemctl enable iscsid'
ansible pc -m shell -a 'sudo systemctl start iscsid'
ansible pc -m shell -a 'sudo systemctl status iscsid'
ansible pc1 -m shell -a 'microk8s kubectl create namespace longhorn-system'
# https://longhorn.io/docs/1.1.1/deploy/install/
ansible pc -m shell -a 'sudo apt-get install jq nfs-common -y '
#
# händisch
#
#curl -sSfL https://raw.githubusercontent.com/longhorn/longhorn/v1.1.1/scripts/environment_check.sh > environment_check.sh
#
#Ersetzen aller kubectl durch microk8s.kubectl
#chmod 755 environment_check.sh
#
#alfred@pc1:~$ ./environment_check.sh
#
#daemonset.apps/longhorn-environment-check created
#waiting for pods to become ready (0/0)
#waiting for pods to become ready (0/5)
#waiting for pods to become ready (1/5)
#all pods ready (5/5)
#
#  MountPropagation is enabled!
#
#cleaning up...
#daemonset.apps "longhorn-environment-check" deleted
#clean up complete
#alfred@pc1:~$
#
#
ansible pc1 -m shell -a 'microk8s kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.1.1/deploy/prerequisite/longhorn-iscsi-installation.yaml'
ansible pc1 -m shell -a 'microk8s kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.1.1/deploy/prerequisite/longhorn-nfs-installation.yaml '
ansible pc1 -m shell -a 'microk8s kubectl apply -f https://raw.githubusercontent.com/longhorn/longhorn/v1.1.1/deploy/longhorn.yaml '
kubectl get pods --namespace longhorn-system --watch
# ohne helm
#ansible pc -m shell -a 'microk8s helm3 repo add longhorn https://charts.longhorn.io'
#ansible pc -m shell -a 'microk8s helm3 repo update'
#ansible pc1 -m shell -a 'microk8s helm3 install longhorn longhorn/longhorn --namespace longhorn-system --set defaultSettings.defaultDataPath="/var/lib/longhorn" --set csi.kubeletRootDir="/var/snap/microk8s/common/var/lib/kubelet"'
#ansible pc -m shell -a 'microk8s status --wait-ready'
#ansible pc1 -m shell -a 'microk8s kubectl get all -n longhorn-system '
#ansible pc1 -m shell -a 'microk8s kubectl get storageclass '
##
#!/bin/bash
############################################################################################
#    $Date: 2021-05-23 22:13:40 +0200 (So, 23. Mai 2021) $
#    $Revision: 424 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil3_metallb.sh $
#    $Id: microk8s_Installation_teil3_metallb.sh 424 2021-05-23 20:13:40Z alfred $
#
# Schnell-Installation microk8s - metallb als Loadbalancer
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
ansible pc1 -m shell -a 'microk8s enable metallb:192.168.0.210-192.168.0.244'
ansible pc -m shell -a 'microk8s status --wait-ready'
##
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs.sh $
#    $Id: microk8s_Installation_teil4_glusterfs.sh 453 2021-05-24 09:55:30Z alfred $
#
# Schnell-Installation microk8s - glusterfs als Filesystem mit statischen Volumes
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
#
# Definitionen für das Deployment
#
app="mikrok8s/install/glusterfs"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 453 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}

#
# Startup-file für Modprobe
#
cat <<EOF > ${wd}/loop_gluster.service
#
# Servicedefinition für Glusterfs
#    \$Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    \$Revision: 453 $
#    \$Author: alfred $
#    \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs.sh $
#    \$Id: microk8s_Installation_teil4_glusterfs.sh 453 2021-05-24 09:55:30Z alfred $
#
[Unit]
Description=modprobe for GlusterFS
DefaultDependencies=false
Before=local-fs.target
After=systemd-udev-settle.service
Requires=systemd-udev-settle.service

[Service]
Type=oneshot
ExecStart=/bin/bash -c "modprobe dm_thin_pool && modprobe dm_snapshot && modprobe dm_mirror && modprobe fuse "

[Install]
WantedBy=local-fs.target
EOF
#
ansible pc -m shell -a 'ls -lisa '${id}'/loop_gluster.service'
ansible pc -m shell -a 'sudo cp -f '${id}'/loop_gluster.service /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chown root:root /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chmod 755 /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'ls -lisa /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo systemctl enable /etc/systemd/system/loop_gluster.service'
#

ansible pc -m shell -a 'sudo apt-get update'
ansible pc -m shell -a 'sudo apt-get -y upgrade'
ansible pc -m shell -a 'sudo apt-get install software-properties-common'
ansible pc -m shell -a 'sudo apt-get install -y glusterfs-client'
#
ansible pc -m shell -a 'sudo modprobe fuse'
ansible pc -m shell -a 'sudo modprobe dm_thin_pool'
ansible pc -m shell -a 'sudo modprobe dm_snapshot'
ansible pc -m shell -a 'sudo modprobe dm_mirror'
#
#
ansible pc -m shell -a 'sudo apt-get install -y xfsprogs glusterfs-server'
ansible pc -m shell -a 'sudo systemctl start glusterd'
ansible pc -m shell -a 'sudo systemctl enable glusterd'
ansible pc -m shell -a 'sudo systemctl status glusterd'
#
ansible pc1 -m shell -a 'sudo gluster peer probe pc2'
ansible pc1 -m shell -a 'sudo gluster peer probe pc3'
ansible pc1 -m shell -a 'sudo gluster peer probe pc4'
ansible pc1 -m shell -a 'sudo gluster peer probe pc5'
ansible pc1 -m shell -a 'sudo gluster peer status'
#
ansible pc -m shell -a 'sudo wipefs -a /dev/sda'
ansible pc -m shell -a '(
echo g # Create a new empty DOS partition table
echo n # Add a new partition
echo   # Just press enter accept the default
echo   # Just press enter accept the default
echo   # Just press enter accept the default
echo w # Write changes
) | sudo fdisk -w auto /dev/sda'
ansible pc -m shell -a 'sudo mkfs.xfs -f -L pcvol-brick1 /dev/sda1'
ansible pc -m shell -a 'sudo printf $(sudo blkid -o export /dev/sda1|grep PARTUUID)" /data/glusterfs/pcvol/brick1 xfs defaults,noatime 1 2\n" | sudo tee -a /etc/fstab'
ansible pc -m shell -a 'sudo cat /etc/fstab'
ansible pc -m shell -a 'sudo mkdir -p /data/glusterfs/pcvol/brick1/'
#
ansible pc -m shell -a 'sudo mount /data/glusterfs/pcvol/brick1'
ansible pc -m shell -a 'df -h | grep -i sda'
#https://docs.gluster.org/en/latest/Quick-Start-Guide/Architecture/
# Dispersed Volume
ansible pc -m shell -a 'sudo mkdir -p /data/glusterfs/pcvol/brick1/web'
ansible pc1 -m shell -a 'sudo gluster volume create web disperse 3 redundancy 1 transport tcp pc1:/data/glusterfs/pcvol/brick1/web pc2:/data/glusterfs/pcvol/brick1/web pc3:/data/glusterfs/pcvol/brick1/web'
ansible pc -m shell -a 'sudo mkdir -p /data/glusterfs/pcvol/brick1/k8s'
ansible pc1 -m shell -a 'sudo gluster volume create k8s disperse 3 redundancy 1 transport tcp pc1:/data/glusterfs/pcvol/brick1/k8s pc3:/data/glusterfs/pcvol/brick1/k8s pc4:/data/glusterfs/pcvol/brick1/k8s'
ansible pc -m shell -a 'sudo mkdir -p /data/glusterfs/pcvol/brick1/db'
ansible pc1 -m shell -a 'sudo gluster volume create db disperse 3 redundancy 1 transport tcp pc1:/data/glusterfs/pcvol/brick1/db pc3:/data/glusterfs/pcvol/brick1/db pc5:/data/glusterfs/pcvol/brick1/db'
ansible pc -m shell -a 'sudo mkdir -p /data/glusterfs/pcvol/brick1/minio'
ansible pc1 -m shell -a 'sudo gluster volume create minio disperse 3 redundancy 1 transport tcp pc2:/data/glusterfs/pcvol/brick1/minio pc4:/data/glusterfs/pcvol/brick1/minio pc5:/data/glusterfs/pcvol/brick1/minio'
ansible pc -m shell -a 'sudo mkdir -p /data/glusterfs/pcvol/brick1/spare'
ansible pc1 -m shell -a 'sudo gluster volume create spare disperse 3 redundancy 1 transport tcp pc1:/data/glusterfs/pcvol/brick1/spare pc2:/data/glusterfs/pcvol/brick1/spare pc5:/data/glusterfs/pcvol/brick1/spare'
# Starten der Platten
ansible pc1 -m shell -a 'sudo gluster volume start web'
ansible pc1 -m shell -a 'sudo gluster volume start k8s'
ansible pc1 -m shell -a 'sudo gluster volume start db'
ansible pc1 -m shell -a 'sudo gluster volume start minio'
ansible pc1 -m shell -a 'sudo gluster volume start spare'
# Prüfen der Platten und der Verteilung
ansible pc1 -m shell -a 'sudo gluster pool list'
ansible pc1 -m shell -a 'sudo gluster volume info'
ansible pc -m shell -a 'sudo tree -a /data/glusterfs'

# Lokales generieren der Endpoints, als ansible am Monitoring-Host
./gluster_endpoints.sh ${wd}
#
cat <<EOF > ${wd}/pv-gluster.yaml
#  \$Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 453 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs.sh $
#  \$Id: microk8s_Installation_teil4_glusterfs.sh 453 2021-05-24 09:55:30Z alfred $
# Erzeugen der Persistent Volumes für Glusterfs
#
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-gluster-web
  annotations:
    pv.beta.kubernetes.io/gid: "0"
spec:
  storageClassName: sc-gluster-web
  capacity:
    storage: 5Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: gluster-web
    path: /web
    readOnly: false
  persistentVolumeReclaimPolicy: Retain
  claimRef:
    namespace: default
    name: pvc-gluster-web
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-gluster-k8s
  annotations:
    pv.beta.kubernetes.io/gid: "0"
spec:
  storageClassName: sc-gluster-k8s
  capacity:
    storage: 14Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: gluster-k8s
    path: /k8s
    readOnly: false
  persistentVolumeReclaimPolicy: Retain
  claimRef:
    namespace: default
    name: pvc-gluster-k8s
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-gluster-db
  annotations:
    pv.beta.kubernetes.io/gid: "0"
spec:
  storageClassName: sc-gluster-db
  capacity:
    storage: 9Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: gluster-db
    path: /db
    readOnly: false
  persistentVolumeReclaimPolicy: Retain
  claimRef:
    namespace: default
    name: pvc-gluster-db
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-gluster-minio
  annotations:
    pv.beta.kubernetes.io/gid: "0"
spec:
  storageClassName: sc-gluster-minio
  capacity:
    storage: 15Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: gluster-minio
    path: /minio
    readOnly: false
  persistentVolumeReclaimPolicy: Retain
  claimRef:
    namespace: default
    name: pvc-gluster-minio
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-gluster-spare
  annotations:
    pv.beta.kubernetes.io/gid: "0"
spec:
  storageClassName: sc-gluster-spare
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteMany
  glusterfs:
    endpoints: gluster-spare
    path: /spare
    readOnly: false
  persistentVolumeReclaimPolicy: Retain
  claimRef:
    namespace: default
    name: pvc-gluster-spare
EOF

cat <<EOF > ${wd}/sc-pvc-gluster.yaml
#  \$Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 453 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs.sh $
#  \$Id: microk8s_Installation_teil4_glusterfs.sh 453 2021-05-24 09:55:30Z alfred $
# Erzeugen der Persistent Volumes Claims für Glusterfs
#
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: sc-gluster-web
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-gluster-web
  namespace: default
spec:
  storageClassName: sc-gluster-web
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 5Gi  
  volumeName: pv-gluster-web
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: sc-gluster-k8s
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-gluster-k8s
  namespace: default
spec:
  storageClassName: sc-gluster-k8s
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 14Gi   
  volumeName: pv-gluster-k8s
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: sc-gluster-db
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-gluster-db
  namespace: default
spec:
  storageClassName: sc-gluster-db
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 9Gi   
  volumeName: pv-gluster-db
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: sc-gluster-minio
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-gluster-minio
  namespace: default
spec:
  storageClassName: sc-gluster-minio
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 15Gi   
  volumeName: pv-gluster-minio
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: sc-gluster-spare
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-gluster-spare
  namespace: default
spec:
  storageClassName: sc-gluster-spare
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 1Gi   
  volumeName: pv-gluster-spare
---
EOF

cat <<EOF > ${wd}/busybox-glusterfs.yaml
#  \$Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 453 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs.sh $
#  \$Id: microk8s_Installation_teil4_glusterfs.sh 453 2021-05-24 09:55:30Z alfred $
# Test für alle möglichen Mounts:)
#
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: busybox-glusterfs
  namespace: default
  labels:
    app: busybox-glusterfs
spec:
  replicas: 1
  strategy:
    type: RollingUpdate
  selector:
    matchLabels:
      app: busybox-glusterfs
  template:
    metadata:
      labels:
        app: busybox-glusterfs
    spec:
      volumes:
      - name: web
        persistentVolumeClaim:
          claimName: pvc-gluster-web
      - name: k8s
        persistentVolumeClaim:
          claimName: pvc-gluster-k8s
      - name: db
        persistentVolumeClaim:
          claimName: pvc-gluster-db
      - name: minio
        persistentVolumeClaim:
          claimName: pvc-gluster-minio
      - name: spare
        persistentVolumeClaim:
          claimName: pvc-gluster-spare
      - name: sshfs
        hostPath:
         # directory location on host
         path: /opt/cluster/busybox
         # this field is optional
         type: Directory            
      containers:
      - name: busybox-glusterfs
        image: busybox
        command: [ "/bin/sh"]
        args:
          - -c
          - >-
             echo " Etwas Bewegung auf den Disken:) ";
             while true; do
               date > /web/\${HOSTNAME}.txt; export >> /web/\${HOSTNAME}.txt;
               date > /k8s/\${HOSTNAME}.txt; export >> /k8s/\${HOSTNAME}.txt;
               date > /db/\${HOSTNAME}.txt; export >> /db/\${HOSTNAME}.txt;
               date > /minio/\${HOSTNAME}.txt; export >> /minio/\${HOSTNAME}.txt;
               date > /spare/\${HOSTNAME}.txt; export >> /spare/\${HOSTNAME}.txt;
               date > /sshfs/\${HOSTNAME}.txt; export >> /sshfs/\${HOSTNAME}.txt;
               sleep 600;
             done;
             echo "command completed, proceed ....";
        imagePullPolicy: IfNotPresent
        ports:
          - containerPort: 443
          - containerPort: 80
        volumeMounts:
          - mountPath: /web
            name: web
          - mountPath: /k8s
            name: k8s
          - mountPath: /db
            name: db
          - mountPath: /minio
            name: minio
          - mountPath: /spare
            name: spare
          - mountPath: /sshfs
            name: sshfs       
---
apiVersion: v1
kind: Service
metadata:
  name: busybox-glusterfs
  namespace: default
  labels:
    name: busybox-glusterfs
spec:
  ports:
    - port: 80
      targetPort: 80
      protocol: TCP
      name: http
    - port: 443
      targetPort: 443
      protocol: TCP
      name: https
  selector:
    name: busybox-glusterfs
EOF
#
ansible pc1 -m shell -a 'microk8s kubectl apply -f '${id}'/.'
#
ansible pc1 -m shell -a 'microk8s kubectl get endpoints --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get sc --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get pvc --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get pv --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get all -n default'
#
# Nun haben wir ClusterStorage mit statisch gemounteden Platten
#
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_heketi.sh $
#    $Id: microk8s_Installation_teil4_glusterfs_heketi.sh 453 2021-05-24 09:55:30Z alfred $
#
# Schnell-Installation microk8s - heketi als Management-Layer - wird nicht gemacht
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster

exit

#
# Definitionen für das Deployment
#
app="mikrok8s/install/glusterfs-heketi"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 453 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}

ansible pc -m shell -a 'sudo apt-get update'
ansible pc -m shell -a 'sudo apt-get -y upgrade'
ansible pc -m shell -a 'sudo apt-get install software-properties-common'
#ansible pc -m shell -a 'sudo add-apt-repository -y ppa:gluster/glusterfs-7'
#ansible pc -m shell -a 'sudo rm /etc/apt/sources.list.d/gluster-ubuntu-glusterfs-7-focal.list'
#ansible pc -m shell -a 'sudo apt install -y glusterfs-fuse'
#
ansible pc -m shell -a 'sudo modprobe fuse'
ansible pc -m shell -a 'sudo modprobe dm_thin_pool'
ansible pc -m shell -a 'sudo modprobe dm_snapshot'
ansible pc -m shell -a 'sudo modprobe dm_mirror'
#
ansible pc -m shell -a 'sudo apt-get install -y glusterfs-client'
#ansible pc -m shell -a 'sudo apt-get install -y glusterfs-fuse' # nur redhat
ansible pc -m shell -a 'sudo apt-get install -y xfsprogs glusterfs-server'
ansible pc -m shell -a 'sudo systemctl start glusterd'
ansible pc -m shell -a 'sudo systemctl enable glusterd'
ansible pc -m shell -a 'sudo systemctl status glusterd'
#
ansible pc1 -m shell -a 'sudo gluster peer probe pc2'
ansible pc1 -m shell -a 'sudo gluster peer probe pc3'
ansible pc1 -m shell -a 'sudo gluster peer probe pc4'
ansible pc1 -m shell -a 'sudo gluster peer probe pc5'
ansible pc1 -m shell -a 'sudo gluster peer status'
#
# Startup-file für Modprobe
#
ansible pc -m shell -a 'cat <<EOF > ./loop_gluster.service
#
# Servicedefinition für heketi
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_heketi.sh $
#    $Id: microk8s_Installation_teil4_glusterfs_heketi.sh 453 2021-05-24 09:55:30Z alfred $
#worker1-3# vi /etc/systemd/system/loop_gluster.service
#
[Unit]
Description=modprobe for GlusterFS, For heketi /dev/sda1 is used
DefaultDependencies=false
Before=local-fs.target
After=systemd-udev-settle.service
Requires=systemd-udev-settle.service

[Service]
Type=oneshot
ExecStart=/bin/bash -c "modprobe dm_thin_pool && modprobe dm_snapshot && modprobe dm_mirror && modprobe fuse "

[Install]
WantedBy=local-fs.target
EOF
'
ansible pc -m shell -a 'ls -lisa ./loop_gluster.service'
ansible pc -m shell -a 'sudo mv -f ./loop_gluster.service /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chown root:root /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chmod 755 /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'ls -lisa /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo systemctl enable /etc/systemd/system/loop_gluster.service'
#
ansible pc -m shell -a 'sudo wipefs -a /dev/sda'
ansible pc -m shell -a '(
echo g # Create a new empty DOS partition table
echo n # Add a new partition
echo   # Just press enter accept the default
echo   # Just press enter accept the default
echo   # Just press enter accept the default
echo w # Write changes
) | sudo fdisk -w auto /dev/sda'
ansible pc -m shell -a 'sudo glusterfs --version' # Prüfen der Version

#
# Nun kommt der Manuelle Teil
#
ansible pc1 -m shell -a 'rm -R -f ./heketi'
ansible pc1 -m shell -a 'git clone https://github.com/heketi/heketi'
ansible pc1 -m shell -a 'microk8s kubectl delete -f ./heketi/extras/kubernetes/glusterfs-daemonset.json '
ansible pc1 -m shell -a 'microk8s kubectl apply -f ./heketi/extras/kubernetes/glusterfs-daemonset.json '
# Labeln der Nodes mit vorhandenem glusterfs
ansible pc1 -m shell -a 'microk8s kubectl label node pc1 storagenode=glusterfs'
ansible pc1 -m shell -a 'microk8s kubectl label node pc2 storagenode=glusterfs'
ansible pc1 -m shell -a 'microk8s kubectl label node pc3 storagenode=glusterfs'
ansible pc1 -m shell -a 'microk8s kubectl label node pc4 storagenode=glusterfs'
ansible pc1 -m shell -a 'microk8s kubectl label node pc5 storagenode=glusterfs'
ansible pc1 -m shell -a 'microk8s kubectl get pod'
ansible pc1 -m shell -a 'microk8s kubectl get pvc --all-namespaces '
#
# Jetzt kann man die Busybox einspielen
#
cat <<EOF > ${wd}/busybox-glusterfs-heketi.yaml
#  \$Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 453 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_heketi.sh $
#  \$Id: microk8s_Installation_teil4_glusterfs_heketi.sh 453 2021-05-24 09:55:30Z alfred $
# Test für alle möglichen Mounts:)
#
---
apiVersion: apps/v1
kind: Deployment
metadata:
 name: busybox-glusterfs-heketi
 namespace: default
 labels:
   app: busybox-glusterfs-heketi
spec:
 replicas: 1
 strategy:
   type: RollingUpdate
 selector:
   matchLabels:
     app: busybox-glusterfs-heketi
 template:
   metadata:
     labels:
       app: busybox-glusterfs-heketi
   spec:
     volumes:
     - name: web
       persistentVolumeClaim:
         claimName: pvc-gluster-web
      - name: sshfs
        hostPath:
         # directory location on host
         path: /opt/cluster/busybox
         # this field is optional
         type: Directory            
     containers:
     - name: busybox-glusterfs-heketi
       image: busybox
       command: [ "/bin/sh"]
       args:
          - -c
          - >-
             echo " Etwas Bewegung auf den Disken:) ";
             while true; do
               date > /web/heketi-\${HOSTNAME}.txt; export >> /web/heketi-\${HOSTNAME}.txt;
               date > /sshfs/heketi-\${HOSTNAME}.txt; export >> /sshfs/heketi-\${HOSTNAME}.txt;
               sleep 600;
             done;
             echo "command completed, proceed ....";
       imagePullPolicy: IfNotPresent
       ports:
        - containerPort: 443
        - containerPort: 80
       volumeMounts:
        - mountPath: /web
          name: web
        - mountPath: /sshfs
          name: sshfs       
---
apiVersion: v1
kind: Service
metadata:
 name: busybox-glusterfs-heketi
 namespace: default
 labels:
   name: busybox-glusterfs-heketi
spec:
 ports:
  - port: 80
    targetPort: 80
    protocol: TCP
    name: http
  - port: 443
    targetPort: 443
    protocol: TCP
    name: https
 selector:
     name: busybox-glusterfs-heketi
EOF
#
ansible pc1 -m shell -a 'microk8s kubectl apply -f '${id}'/.'
#
ansible pc1 -m shell -a 'microk8s kubectl get endpoints --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get sc --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get pvc --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get pv --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get all -n default'
#
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_heketi_standalone.sh $
#    $Id: microk8s_Installation_teil4_glusterfs_heketi_standalone.sh 453 2021-05-24 09:55:30Z alfred $
#
# Schnell-Installation microk8s - heketi Server standalone - wird nicht gemacht
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition

exit

# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
#
#
# Definitionen für das Deployment
#
app="mikrok8s/install/glusterfs"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 453 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}
#
ansible pc -m shell -a 'sudo apt-get update'
ansible pc -m shell -a 'sudo apt-get -y upgrade'
#
ansible pc -m shell -a 'sudo modprobe fuse'
ansible pc -m shell -a 'sudo modprobe dm_thin_pool'
ansible pc -m shell -a 'sudo modprobe dm_snapshot'
ansible pc -m shell -a 'sudo modprobe dm_mirror'
#
ansible pc -m shell -a 'sudo apt-get install -y xfsprogs glusterfs-server glusterfs-client lvm2 thin-provisioning-tools'
ansible pc -m shell -a 'sudo systemctl start glusterd'
ansible pc -m shell -a 'sudo systemctl enable glusterd'
ansible pc -m shell -a 'sudo systemctl status glusterd'
#
ansible pc1 -m shell -a 'sudo gluster peer probe pc2'
ansible pc1 -m shell -a 'sudo gluster peer probe pc3'
ansible pc1 -m shell -a 'sudo gluster peer probe pc4'
ansible pc1 -m shell -a 'sudo gluster peer probe pc5'
ansible pc1 -m shell -a 'sudo gluster peer status'
#
# Startup-file für Modprobe
#
ansible pc -m shell -a 'cat <<EOF > ./loop_gluster.service
#
# Servicedefinition für heketi
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_heketi_standalone.sh $
#    $Id: microk8s_Installation_teil4_glusterfs_heketi_standalone.sh 453 2021-05-24 09:55:30Z alfred $
#worker1-3# vi /etc/systemd/system/loop_gluster.service
#
[Unit]
Description=modprobe for GlusterFS, For heketi /dev/sda1 is used
DefaultDependencies=false
Before=local-fs.target
After=systemd-udev-settle.service
Requires=systemd-udev-settle.service

[Service]
Type=oneshot
ExecStart=/bin/bash -c "modprobe dm_thin_pool && modprobe dm_snapshot && modprobe dm_mirror && modprobe fuse "

[Install]
WantedBy=local-fs.target
EOF
'
ansible pc -m shell -a 'ls -lisa ./loop_gluster.service'
ansible pc -m shell -a 'sudo mv -f ./loop_gluster.service /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chown root:root /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chmod 755 /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'ls -lisa /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo systemctl enable /etc/systemd/system/loop_gluster.service'
#
# alle Platten aus anderen Versuchen stoppen und löschen
#sudo gluster volume stop db
#sudo gluster volume delete db
#db
#k8s
#minio
#spare
#web
#
#ansible pc -m shell -a 'sudo ls /data/glusterfs/pcvol/brick1/*'
#ansible pc -m shell -a 'sudo rm -f -R /data/glusterfs/pcvol/brick1/*'
#ansible pc -m shell -a 'sudo rm -f -R /data/glusterfs/pcvol/brick1/*'
#ansible pc -m shell -a 'sudo umount /data/glusterfs/pcvol/brick1'
#
# aus /etc/fstab den mountpoint entfernen
#
ansible pc -m shell -a 'sudo wipefs -a /dev/sda'
ansible pc1 -m shell -a 'wget https://github.com/heketi/heketi/releases/download/v10.3.0/heketi-v10.3.0.linux.arm64.tar.gz'
ansible pc1 -m shell -a 'sudo mkdir -p /etc/heketi'
ansible pc1 -m shell -a 'sudo tar xzvf heketi-v10.3.0.linux.arm64.tar.gz -C /etc/heketi'
ansible pc1 -m shell -a 'rm -f heketi-v10.3.0.linux.arm64.tar.gz'
ansible pc1 -m shell -a 'sudo ln /etc/heketi/heketi/heketi-cli /usr/bin/heketi-cli'
ansible pc1 -m shell -a 'sudo ln /etc/heketi/heketi/heketi /usr/bin/heketi'
#
# Keys generieren und verteilen
#
ansible pc -m shell -a 'sudo adduser heketi --gecos "First Last,RoomNumber,WorkPhone,HomePhone" --disabled-password'
ansible pc -m shell -a 'echo "heketi:bvxLnKi6PhyIoHdaTCqR" | sudo chpasswd'
ansible pc -m shell -a 'sudo usermod -aG sudo heketi'
#
# Manuell einloggen und von pc1 aus auf alle anderen Nodes verteilen
#ssh-keygen
#ssh-copy-id -i
#
ansible pc1 -m shell -a 'cat <<EOF > ./heketi.service
#
# Servicedefinition für heketi
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_heketi_standalone.sh $
#    $Id: microk8s_Installation_teil4_glusterfs_heketi_standalone.sh 453 2021-05-24 09:55:30Z alfred $
#worker1-3# vi /etc/systemd/system/loop_gluster.service
#
[Unit]
Description=Heketi Server

[Service]
Type=simple
WorkingDirectory=/var/lib/heketi
EnvironmentFile=-/etc/heketi/heketi.env
User=heketi
ExecStart=sudo /usr/bin/heketi --config=/etc/heketi/heketi/heketi.json
Restart=on-failure
StandardOutput=syslog
StandardError=syslog

[Install]
WantedBy=multi-user.target
EOF
'
ansible pc1 -m shell -a 'ls -lisa ./heketi.service'
ansible pc1 -m shell -a 'sudo mv -f ./heketi.service /etc/systemd/system/heketi.service'
ansible pc1 -m shell -a 'sudo chown root:root /etc/systemd/system/heketi.service'
ansible pc1 -m shell -a 'sudo chmod 755 /etc/systemd/system/heketi.service'
ansible pc1 -m shell -a 'ls -lisa /etc/systemd/system/heketi.service'
#
# Anpassen von /etc/heketi/heketi.json
    "executor": "ssh",

    "_sshexec_comment": "SSH username and private key file information",
    "sshexec": {
      "keyfile": "/home/heketi/.ssh/id_rsa",
      "user": "heketi",
      "sudo": true,
      "port": "22",
      "fstab": "/etc/fstab",
      "backup_lvm_metadata": false
    },
#

ansible pc1 -m shell -a 'sudo chown -R heketi:heketi /var/lib/heketi'
ansible pc1 -m shell -a 'sudo chown -R heketi:heketi /etc/heketi'
ansible pc1 -m shell -a 'sudo systemctl daemon-reload'
ansible pc1 -m shell -a 'sudo systemctl enable /etc/systemd/system/heketi.service'
ansible pc1 -m shell -a 'sudo systemctl start heketi'
ansible pc1 -m shell -a 'sudo systemctl status heketi'


# dann das heketi-topology.json erstellen und nach /etc/heketi/heketi kopieren

heketi-cli topology load --json=heketi_topology.json --user=admin --secret bvxLnKi6PhyIoHdaTCqR
Creating cluster ... ID: 48e64cfd65e93b4e55b117e19ceea171
    Allowing file volumes on cluster.
    Allowing block volumes on cluster.
    Creating node 192.168.0.201 ... ID: 38b8822507b61118219bb24117df9468
        Adding device /dev/sda ... OK
    Creating node 192.168.0.202 ... ID: f0d16fcd7e98a982572d1aba73d332d0
        Adding device /dev/sda ... OK
    Creating node 192.168.0.203 ... ID: f144f48e5f0013573c8c606890b14d91
        Adding device /dev/sda ... OK
    Creating node 192.168.0.204 ... ID: 7f719c43250d2341890eae10046af1fb
        Adding device /dev/sda ... OK
    Creating node 192.168.0.205 ... ID: af1739b78d6244a4dcbc6fd2c6d99550
        Adding device /dev/sda ... OK

# Check
Clusters:
Id:48e64cfd65e93b4e55b117e19ceea171 [file][block]
root@pc1:/etc/heketi/heketi# heketi-cli cluster info 48e64cfd65e93b4e55b117e19ceea171 --user=admin --secret bvxLnKi6PhyIoHdaTCqR
Cluster id: 48e64cfd65e93b4e55b117e19ceea171
Nodes:
38b8822507b61118219bb24117df9468
7f719c43250d2341890eae10046af1fb
af1739b78d6244a4dcbc6fd2c6d99550
f0d16fcd7e98a982572d1aba73d332d0
f144f48e5f0013573c8c606890b14d91
Volumes:

Block: true

File: true


# Secret für die Storage-Klasse erzeugen
ansible pc1 -m shell -a 'cat <<EOF | microk8s kubectl apply -f -
apiVersion: v1
kind: Secret
metadata:
  name: heketi-secret
type: Opaque
stringData:
  config.yaml: |
    apiUrl: "https://192.168.0.201:8080"
    username: admin
    password: bvxLnKi6PhyIoHdaTCqR
EOF
'
ansible pc1 -m shell -a 'microk8s kubectl delete sc glusterfs'
ansible pc1 -m shell -a 'cat <<EOF | microk8s kubectl apply -f -
#
# Storage Class für unser Filesystem
#
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: glusterfs
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/glusterfs
reclaimPolicy: Delete
volumeBindingMode: Immediate
parameters:
  resturl: "https://192.168.0.201:8080"
  restuser: "admin"
  secretName: "heketi-secret"
  secretNamespace: "default"
  volumetype: replicate:3
  volumenameprefix: "icp"
EOF
'
ansible pc1 -m shell -a 'microk8s kubectl patch storageclass microk8s-hostpath -p '\''{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'\'
ansible pc1 -m shell -a 'microk8s kubectl patch storageclass glusterfs -p '\''{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'\'
ansible pc1 -m shell -a 'microk8s kubectl get sc'
#


ansible pc1 -m shell -a 'cat <<EOF | microk8s kubectl apply -f -
#
# Erzeugen eines Persistent Volumes Claims für Glusterfs
#
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-gluster-web
  namespace: default
spec:
  storageClassName: glusterfs
  accessModes:
  - ReadWriteMany      
  resources:
     requests:
       storage: 5Gi
EOF
'
ansible pc1 -m shell -a 'microk8s kubectl get pvc --all-namespaces '
#
# Jetzt kann man die Busybox einspielen
#
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_kadalu.sh $
#    $Id: microk8s_Installation_teil4_glusterfs_kadalu.sh 453 2021-05-24 09:55:30Z alfred $
#
# Schnell-Installation microk8s - kadalu - wird nicht gemacht
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition

exit


# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
#
#
# Definitionen für das Deployment
#
app="mikrok8s/install/glusterfs"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 453 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}


ansible pc -m shell -a 'sudo apt-get update'
ansible pc -m shell -a 'sudo apt-get -y upgrade'
#
ansible pc -m shell -a 'sudo modprobe fuse'
ansible pc -m shell -a 'sudo modprobe dm_thin_pool'
ansible pc -m shell -a 'sudo modprobe dm_snapshot'
ansible pc -m shell -a 'sudo modprobe dm_mirror'
#
#Implementation
#• There is no need to have a glusterd running on the host. ???????????????????????
ansible pc -m shell -a 'sudo apt-get install -y xfsprogs glusterfs-server glusterfs-client lvm2 thin-provisioning-tools'
ansible pc -m shell -a 'sudo systemctl start glusterd'
ansible pc -m shell -a 'sudo systemctl enable glusterd'
ansible pc -m shell -a 'sudo systemctl status glusterd'
#
# Startup-file für Modprobe
#
ansible pc -m shell -a 'cat <<EOF > ./loop_gluster.service
#
# Servicedefinition für kadalu
#    $Date: 2021-05-24 11:55:30 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 453 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil4_glusterfs_kadalu.sh $
#    $Id: microk8s_Installation_teil4_glusterfs_kadalu.sh 453 2021-05-24 09:55:30Z alfred $
#worker1-3# vi /etc/systemd/system/loop_gluster.service
#
[Unit]
Description=modprobe for GlusterFS, For heketi /dev/sda1 is used
DefaultDependencies=false
Before=local-fs.target
After=systemd-udev-settle.service
Requires=systemd-udev-settle.service

[Service]
Type=oneshot
ExecStart=/bin/bash -c "modprobe dm_thin_pool && modprobe dm_snapshot && modprobe dm_mirror && modprobe fuse "

[Install]
WantedBy=local-fs.target
EOF
'
ansible pc -m shell -a 'ls -lisa ./loop_gluster.service'
ansible pc -m shell -a 'sudo mv -f ./loop_gluster.service /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chown root:root /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo chmod 755 /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'ls -lisa /etc/systemd/system/loop_gluster.service'
ansible pc -m shell -a 'sudo systemctl enable /etc/systemd/system/loop_gluster.service'
# So muß die Platte sein
ansible pc -m shell -a 'sudo dd if=/dev/zero of=/dev/sda bs=512 count=1'
ansible pc -m shell -a 'sudo wipefs -a -t dos -f /dev/sda'
ansible pc -m shell -a 'lsblk'
#ansible pc -m shell -a 'sudo wipefs -a /dev/sda'
#ansible pc -m shell -a '(
#echo g # Create a new empty DOS partition table
#echo n # Add a new partition
#echo   # Just press enter accept the default
#echo   # Just press enter accept the default
#echo   # Just press enter accept the default
#echo w # Write changes
#) | sudo fdisk -w auto /dev/sda'
##ansible pc -m shell -a 'sudo mkfs.xfs /dev/sda'
#ansible pc -m shell -a 'sudo mkfs.xfs -f -L pcvol /dev/sda1'
#ansible pc -m shell -a 'sudo printf $(sudo blkid -o export /dev/sda1|grep PARTUUID)" /mnt/data xfs defaults,noatime 1 2\n" | sudo tee -a /etc/fstab'
#ansible pc -m shell -a 'sudo cat /etc/fstab'
#ansible pc -m shell -a 'sudo mkdir -p /mnt/data'
#ansible pc -m shell -a 'sudo mount /mnt/data'
#
#Download the latest release of Kadalu Kubectl plugin using,
ansible pc -m shell -a 'curl -LO https://github.com/kadalu/kadalu/releases/download/0.8.2/kubectl-kadalu'
#Make the kubectl binary executable.
ansible pc -m shell -a 'chmod +x ./kubectl-kadalu'
#Move the binary in to your PATH.
ansible pc -m shell -a 'sudo mv ./kubectl-kadalu /usr/local/bin/kubectl-kadalu'
#Test to ensure the version you installed is up-to-date
ansible pc -m shell -a 'kubectl-kadalu version'
ansible pc -m shell -a 'microk8s kubectl kadalu version'
#Deploy KaDalu Operator using,
#ansible pc1 -m shell -a 'sudo ln -s /snap/bin/microk8s.kubectl /usr/local/bin/kubectl'
#ansible pc1 -m shell -a 'microk8s kubectl kadalu install'
# kadalu hat einen Typo -nkadalu
#
ansible pc1 -m shell -a 'rm -f -R ./kadalu'
ansible pc1 -m shell -a 'rm -f ./kadalu-operator-microk8s.yaml'
ansible pc1 -m shell -a 'git clone https://github.com/kadalu/kadalu'
ansible pc1 -m shell -a 'wget https://raw.githubusercontent.com/kadalu/kadalu/arm/manifests/kadalu-operator-microk8s.yaml'
#ansible pc1 -m shell -a 'wget https://raw.githubusercontent.com/kadalu/kadalu/arm/manifests/kadalu-operator.yaml'
#https://github.com/kadalu/kadalu/blob/devel/manifests/kadalu-operator-microk8s.yaml
#https://raw.githubusercontent.com/kadalu/kadalu/arm/manifests/kadalu-operator-microk8s.yaml
ansible pc1 -m shell -a 'microk8s kubectl apply -f ./kadalu-operator-microk8s.yaml'
#
# File: sample-storage-config.yaml
ansible pc1 -m shell -a 'cat <<EOF | microk8s kubectl apply -f -
# File: sample-storage-config.yaml
---
apiVersion: kadalu-operator.storage/v1alpha1
kind: KadaluStorage
metadata:
# This will be used as name of PV Hosting Volume
  name: storage-pool-1
spec:
  type: Replica1
  storage:
    - node: pc1
      device: /dev/sda
EOF
'
ansible pc1 -m shell -a 'microk8s kubectl get all -n kadalu'
ansible pc1 -m shell -a 'cat <<EOF | microk8s kubectl apply -f -
# File: sample-pvc.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: pv1
spec:
  storageClassName: kadalu.replica1
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Gi
EOF
'
ansible pc1 -m shell -a 'microk8s kubectl get pvc --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get pv --all-namespaces '

(kubectl get pods --all-namespaces) | grep -i kadalu-csi-nodeplugn | while read a b c; do kubectl logs "" -n kadalu; done


# File: sample-storage-config.yaml

#
ansible pc1 -m shell -a 'cat <<EOF | microk8s kubectl apply -f -
# File: sample-storage-config.yaml
---
apiVersion: kadalu-operator.storage/v1alpha1
kind: KadaluStorage
metadata:
# This will be used as name of PV Hosting Volume
  name: storage-pool-1
spec:
  type: Replica3
  storage:
    - node: pc1
      device: /dev/sda
    - node: pc2
      device: /dev/sda
    - node: pc3
      device: /dev/sda
EOF
'
ansible pc1 -m shell -a 'microk8s kubectl get pvc --all-namespaces '
#
# Jetzt kann man die Busybox einspielen
#
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 11:57:32 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 454 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil5_Registry.sh $
#    $Id: microk8s_Installation_teil5_Registry.sh 454 2021-05-24 09:57:32Z alfred $
#
# Schnell-Installation microk8s - Installation Registry
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
#
# Definitionen für das Deployment
#
app="mikrok8s/install/registry"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 454 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}

#
# Diese SC ist eigentlich gedacht, um den Storage der Registry auf einen anderen Platz zu legen
#
cat <<EOF > ${wd}/sc-registry.yaml
#
# Erzeugen der Storage-Klasse und der PVC für die Registry
#
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: sc-registry
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"    
provisioner: microk8s.io/hostpath
volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv-registry
  labels:
    type: local
spec:
  storageClassName: sc-registry
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/opt/cluster/registry"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-registry
spec:
  storageClassName: sc-registry
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 20Gi
EOF
#
#ansible pc1 -m shell -a 'microk8s kubectl apply -f '${id}'/.'
#
#ansible pc1 -m shell -a 'microk8s kubectl get storageclass '
#ansible pc1 -m shell -a 'microk8s kubectl patch storageclass microk8s-hostpath -p '\''{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"false"}}}'\'
#ansible pc1 -m shell -a 'microk8s kubectl get storageclass '
#
# Aber die Registry ist zu blöd für das. Nimmt immer den microk8s-hostpath
#
ansible pc1 -m shell -a 'microk8s enable registry:size=20Gi'
#
# So ist die Registry nicht clusterfähig. Nur erreichbar über Node pc1. Tricks mit ln -s unterlassen wir jetzt.
#
ansible pc -m shell -a 'microk8s status --wait-ready'
##
## Jetzt ist die Registry konfiguriert
##
#!/bin/bash
############################################################################################
#    $Date: 2021-05-23 22:13:40 +0200 (So, 23. Mai 2021) $
#    $Revision: 424 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil6.sh $
#    $Id: microk8s_Installation_teil6.sh 424 2021-05-23 20:13:40Z alfred $
#
# Schnell-Installation microk8s - Installation praktischer AddOns
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Sauber installierte Nodes, Verbundener Cluster
ansible pc1 -m shell -a 'microk8s enable metrics-server'
ansible pc -m shell -a 'microk8s status --wait-ready'
ansible pc1 -m shell -a 'microk8s enable ingress'
ansible pc -m shell -a 'microk8s status --wait-ready'
ansible pc1 -m shell -a 'microk8s enable prometheus'
ansible pc -m shell -a 'microk8s status --wait-ready'
ansible pc1 -m shell -a 'microk8s kubectl get pvc --all-namespaces '
ansible pc1 -m shell -a 'microk8s kubectl get pv --all-namespaces '
##
## Jetzt ist der Cluster online und verfügbar
##
#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 12:06:14 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 455 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil7_certmanager.sh $
#    $Id: microk8s_Installation_teil7_certmanager.sh 455 2021-05-24 10:06:14Z alfred $
#
# cert-manager
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
# Voraussetzung: Scripts in der richtigen Reihenfolge
#
# Definitionen für das Deployment
#
app="mikrok8s/install/certmanager"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 455 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}

wget https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.yaml --output-document=${wd}/cert-manager.yaml
wget https://github.com/jetstack/cert-manager/releases/download/v1.3.1/cert-manager.crds.yaml --output-document=${wd}/cert-manager.crds.yaml

ansible pc1 -m shell -a 'microk8s kubectl create namespace cert-manager'
ansible pc1 -m shell -a 'microk8s kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true'
ansible pc1 -m shell -a 'microk8s kubectl apply -f '${id}'/.'
ansible pc1 -m shell -a 'microk8s kubectl get pods --namespace cert-manager'

#
# ohne Helm siehe https://cert-manager.io/docs/installation/kubernetes/
#microk8s helm3 repo add jetstack https://charts.jetstack.io
#microk8s helm3 repo update
#microk8s helm3 install cert-manager jetstack/cert-manager \
#  --namespace cert-manager \
#  --set installCRDs=true \
#  --set ingressShim.defaultIssuerName=letsencrypt-production \
#  --set ingressShim.defaultIssuerKind=ClusterIssuer \
#  --set ingressShim.defaultIssuerGroup=cert-manager.io
#
# Nun müssen händisch die Nodes zu einem cluster verbunden werden
# microk8s add-node
##

#!/bin/bash
############################################################################################
#    $Date: 2021-05-24 12:49:38 +0200 (Mo, 24. Mai 2021) $
#    $Revision: 461 $
#    $Author: alfred $
#    $HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil8_local.sh $
#    $Id: microk8s_Installation_teil8_local.sh 461 2021-05-24 10:49:38Z alfred $
#
# Einspielen der lokalen Konfigurationen
#
############################################################################################
#shopt -o -s errexit    #—Terminates  the shell script  if a command returns an error code.
shopt -o -s xtrace #—Displays each command before it’s executed.
shopt -o -s nounset #-No Variables without definition
#
# Definitionen für das Deployment
#
app="mikrok8s/install/local"
pf=\$"Revision: "
sf=" "\$
fr="\$Revision: 461 $"
revision=${fr#*"$pf"}
revision=${revision%"$sf"*}
xd=(`date '+%Y-%m-%d'`)
wd="${HOME}/copy/${app}/${xd}/r${revision}"
id="/opt/cluster/${app}/${xd}/r${revision}"
rm -f -R ${wd}
mkdir -p ${wd}

cat <<EOF > ${wd}/namespace-slainte.yaml
#  \$Date: 2021-05-24 12:49:38 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 461 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil8_local.sh $
#  \$Id: microk8s_Installation_teil8_local.sh 461 2021-05-24 10:49:38Z alfred $
# Namespace für die Produktion
#
apiVersion: v1
kind: Namespace
metadata:
  labels:
    kubernetes.io/metadata.name: slainte
  name: slainte
spec:
  finalizers:
  - kubernetes
EOF

cat <<EOF > ${wd}/ingress-service.yaml
#  \$Date: 2021-05-24 12:49:38 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 461 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil8_local.sh $
#  \$Id: microk8s_Installation_teil8_local.sh 461 2021-05-24 10:49:38Z alfred $
# Definition des Ingress-Services
#
apiVersion: v1
kind: Service
metadata:
  name: ingress
  namespace: ingress
spec:
  selector:
    name: nginx-ingress-microk8s
  type: LoadBalancer
  # loadBalancerIP is optional. MetalLB will automatically allocate an IP from its pool if not
  # specified. You can also specify one manually.
  loadBalancerIP: 192.168.0.210
  # Der Ingress-Controller nach aussen für den Router ist immer auf 210
  # Damit kann dieser Ingress-Controller auch in diverser /etc/hosts eingetragen werden z.b.
  # 192.168.0.201    k8s.slainte.at
  ports:
    - name: http
      protocol: TCP
      port: 80
      targetPort: 80
    - name: https
      protocol: TCP
      port: 443
      targetPort: 443
EOF
      
cat <<EOF > ${wd}/certificate-slainte.yaml
#  \$Date: 2021-05-24 12:49:38 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 461 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil8_local.sh $
#  \$Id: microk8s_Installation_teil8_local.sh 461 2021-05-24 10:49:38Z alfred $
# Definition des Issuers und des Certificates
#
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
  name: letsencrypt-prod
  namespace: default
spec:
  acme:
    # The ACME server URL
    server: https://acme-v02.api.letsencrypt.org/directory
#Staging    server: https://acme-staging-v02.api.letsencrypt.org/directory
    # Email address used for ACME registration
    email: Diese E-Mail-Adresse ist vor Spambots geschützt! Zur Anzeige muss JavaScript eingeschaltet sein!
    # Name of a secret used to store the ACME account private key
    privateKeySecretRef:
      name: letsencrypt-prod
       # Enable the HTTP-01 challenge provider
    solvers:
    - http01:
        ingress:
           class: public
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
  name: k8s-slainte-at
  namespace: default
spec:
  secretName: k8s-slainte-at
  issuerRef:
    kind: Issuer
    name: letsencrypt-prod
  commonName: k8s.slainte.at
  dnsNames:
  - k8s.slainte.at      
EOF

cat <<EOF > ${wd}/webserver-slainte.yaml
#  \$Date: 2021-05-24 12:49:38 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 461 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil8_local.sh $
#  \$Id: microk8s_Installation_teil8_local.sh 461 2021-05-24 10:49:38Z alfred $
# Definition des webserver für Slainte
#
apiVersion: v1
kind: Service
metadata:
  name: webserver-slainte
  namespace: default
  labels:
    run: webserver-slainte
spec:
  type: ClusterIP
  ports:
  - port: 443
    targetPort: 443
    protocol: TCP
    name: https
  selector:
    run: webserver-slainte
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: webserver-slainte
  namespace: default
spec:
  selector:
    matchLabels:
      run: webserver-slainte
  replicas: 1
  template:
    metadata:
      labels:
        run: webserver-slainte
    spec:
     volumes:
     - name: nginx-volume
       hostPath:
        # directory location on host
        path: /opt/cluster/nginx
        # this field is optional
        type: Directory     
     - name: html-volume
       hostPath:
        # directory location on host
        path: /opt/cluster/html
        # this field is optional
        type: Directory     
     - name: secret-volume
       secret:
         secretName: k8s-slainte-at
     containers:
     - name: webserver-slainte
       # latest kann zu Überraschungen führen
       image: nginx:1.20
       ports:
       - containerPort: 443
       volumeMounts:
       - mountPath: /etc/nginx
         name: nginx-volume
       - mountPath: /etc/nginx/ssl
         name: secret-volume     
       - mountPath: /usr/share/nginx/html
         name: html-volume     
EOF

cat <<EOF > ${wd}/ingress-slainte.yaml
#  \$Date: 2021-05-24 12:49:38 +0200 (Mo, 24. Mai 2021) $
#  \$Revision: 461 $
#  \$Author: alfred $
#  \$HeadURL: https://monitoring.slainte.at/svn/slainte/trunk/k8s/microk8s/microk8s_Installation_teil8_local.sh $
#  \$Id: microk8s_Installation_teil8_local.sh 461 2021-05-24 10:49:38Z alfred $
# Definition des Ingress für Slainte
#
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: k8s.slainte.at-ingress
  namespace: default
  annotations:
  # Class checken mit kubectl -n ingress describe daemonset.apps/nginx-ingress-microk8s-controller
    kubernetes.io/ingress.class: public
    nginx.ingress.kubernetes.io/rewrite-target: /      
    nginx.ingress.kubernetes.io/backend-protocol: HTTPS
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
    nginx.ingress.kubernetes.io/secure-backends: "true"    
    nginx.ingress.kubernetes.io/configuration-snippet: |-
      proxy_ssl_server_name on;
      proxy_ssl_name \$host;
    cert-manager.io/issuer: letsencrypt-prod
spec:
  rules:
spec:
  rules:
  - host: k8s.slainte.at
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: webserver-slainte
            port:
              number: 443
  tls:
  - hosts:
      - k8s.slainte.at
    secretName: k8s-slainte-at
#    issuerRef:
#      name: letsencrypt-prod
#      kind: Issuer
EOF
#
ansible pc1 -m shell -a 'microk8s kubectl apply -f '${id}'/.'
#
# Adaptieren der Services
#ansible pc1 -m shell -a 'microk8s kubectl patch service kubernetes-dashboard -n kube-system -p '\''{"spec": {"type": "LoadBalancer"}}'\'
ansible pc1 -m shell -a 'microk8s kubectl get services --all-namespaces'
#ansible pc1 -m shell -a 'microk8s kubectl patch service longhorn-frontend -n longhorn-system-p '\''{"spec": {"type": "LoadBalancer"}}'\'
##