更新k8s憑證

#確認憑證
kubeadm alpha certs check-expiration

#匯出設定
kubeadm config view > /root/kubeadm.yaml
更新憑證
kubeadm alpha certs renew all --config=/root/kubeadm.yaml
kubeadm alpha certs check-expiration

#刪除
rm -rf /root/kubeadm.yaml

#重啟
docker ps |grep -E 'k8s_kube-apiserver|k8s_kube-controller-manager|k8s_kube-scheduler|k8s_etcd_etcd' | awk -F ' ' '{print $1}' |xargs docker restart

[k8s] 建立使用者及namespace

建立namespaces

kubectl create ns slanla

建立user

kubectl -n tw-sgis create sa slanla

RBAC 授權

建立規則

cat <<EOF > slanla-user-role.yml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  namespace: tw-sgis
  name: slanla-user-pod
rules:
- apiGroups: ["*"]
  resources: ["pods", "pods/log"]
  verbs: ["get", "watch", "list", "update", "create", "delete"]
EOF
kubectl apply -f slanla-user-role.yml

授權對象

kubectl create rolebinding slanla-view-pod \
  --role=slanla-user-pod \
  --serviceaccount=tw-sgis:slanla \
  --namespace=tw-sgis

產生設定檔

取得 secret 資訊

SECRET=$(kubectl -n tw-sgis get sa slanla -o go-template='{{range .secrets}}{{.name}}{{end}}')

設定API Server

API_SERVER="https://xxx.xxx.xxx.xxx:6443"

取得 ca

CA_CERT=$(kubectl -n tw-sgis get secret ${SECRET} -o yaml | awk '/ca.crt:/{print $2}')

建立

cat <<EOF > slanla.conf
apiVersion: v1
kind: Config
clusters:
- cluster:
    certificate-authority-data: $CA_CERT
    server: $API_SERVER
  name: cluster
EOF

取得token

TOKEN=$(kubectl -n tw-sgis get secret ${SECRET} -o go-template='{{.data.token}}')

設定 token

kubectl config set-credentials slanla-user \
  --token=`echo ${TOKEN} | base64 -d` \
  --kubeconfig=slanla.conf

建立context: default

kubectl config set-context default \
  --cluster=cluster \
  --user=slanla-user \
  --kubeconfig=slanla.conf

指定context: default

kubectl config use-context default \
  --kubeconfig=slanla.conf

修改docker0網段

其中10.172.254.254是Getway,不要自作主張改成 192.168.1.0/24 or 10.172.0.0之類的..

yum install bridge-utils -y

service docker stop
ip link set dev docker0 down
brctl delbr docker0
iptables -t nat -F POSTROUTING


brctl addbr docker0
ip addr add 10.172.254.254/16 dev docker0
ip link set dev docker0 up


cat << EOF > /etc/docker/daemon.json
{
  "bip": "10.172.254.254/16"
}
EOF

systemctl daemon-reload
systemctl restart docker.service
reboot #如果有k8s則需要重開機

refer: https://blog.yowko.com/docker-172-17-ip/

k8s安裝ingress nginx

ingress-nginx最新版已經不包含default-http-backend.
因此安裝0.20.0

#安裝ingress-nginx
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.20.0/deploy/mandatory.yaml
sed -i 's/serviceAccountName: nginx-ingress-serviceaccount/hostNetwork: true\n      serviceAccountName: nginx-ingress-serviceaccount/g' mandatory.yaml
kubectl apply -f mandatory.yaml
rm -f mandatory.yaml*
kubectl get pod --all-namespaces

#修改ingress-nginx
NODE_COUNT=$(kubectl get nodes | grep -v master | grep -v STATUS | wc -l)
echo $NODE_COUNT
if [ $NODE_COUNT -gt 1 ] ; then
kubectl -n ingress-nginx patch deployment default-http-backend --patch $(echo "{\"spec\":{\"replicas\":$NODE_COUNT}}")
kubectl -n ingress-nginx patch deployment nginx-ingress-controller --patch $(echo "{\"spec\":{\"replicas\":$NODE_COUNT}}")
fi
kubectl get pods -n ingress-nginx -o wide

#更換自製 http-backend image
DOMAIN=ssl.cbe.tw
kubectl -n ingress-nginx patch deployment default-http-backend --patch "{\"spec\":{\"template\":{\"spec\":{\"containers\":[{\"name\":\"default-http-backend\",\"resources\":{\"limits\":{\"cpu\":\"100m\",\"memory\":\"200Mi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"200Mi\"}},\"image\":\"slanla/apache-defaultbackend\",\"ports\":[{\"containerPort\":8080,\"protocol\":\"TCP\"}],\"env\":[{\"name\":\"LETSENCRYPT_PROXYPASS_URL\",\"value\":\"http://$DOMAIN/.well-known/acme-challenge/ connectiontimeout=15 timeout=30\"},{\"name\":\"LETSENCRYPT_PROXYPASSREVERSE_URL\",\"value\":\"http://$DOMAIN/.well-known/acme-challenge/\"}],\"livenessProbe\":{\"httpGet\":{\"path\":\"/healthz\",\"port\":8080,\"scheme\":\"HTTP\"},\"initialDelaySeconds\":30,\"timeoutSeconds\":5,\"periodSeconds\":10,\"successThreshold\":1,\"failureThreshold\":3}}]}}}}"
kubectl get pods -n ingress-nginx -o wide

刪除所有k8s上面的pod

刪除所有k8s上面的pod

kubectl get pods --all-namespaces -o wide | awk '{print $1 " " $2}' | while read AA BB; do kubectl delete pod --grace-period=0 --force -n $AA $BB; done

刪除所有k8s上面所有非Running的pod

kubectl get pods --all-namespaces -o wide | grep -v Running | awk '{print $1 " " $2}' | while read AA BB; do kubectl delete pod --grace-period=0 --force -n $AA $BB; done

使用helm部署nfs-client

之前都用yaml部署nfs-client.
但缺點是每次kubernets跟nfs-client版本更換之後,
就可能會發生部署有問題,像是權限…等

剛剛在kubernetes 1.12上面用之前1.10所用的nfs-client之yaml檔案.
結果又出問題了.
後來發現helm有提供nfs-client部署方式.
二話不說,立刻改用helm部署.
語法如下:

helm install stable/nfs-client-provisioner \
  --name nfs-client \
  --set nfs.server=xxx.xxx.xxx.xxx \
  --set nfs.path=/path \
  --set storageClass.name=managed-nfs-storage

重新產生k8s join指令

因為新版的k8s的token已經有時間效期,
所以在安裝完後,隔一陣子想要加入新的節點就會出現舊的join token無法使用.
這時候可以用kubeadm token generate重新產生新的token,並用下列語法印出join指令.

kubeadm token create `kubeadm token generate` --print-join-command --ttl=0

以helm在k8s上部署mariaDB

環境變數:

PASSWORD="1234567890"
NAME="db"
SERVICE="${NAME}-mariadb"
NAMESPACE="default"

儲存裝置為nfs

helm install \
  --namespace ${NAMESPACE} \
  --name ${NAME} stable/mariadb \
  --set slave.replicas=3 \
  --set rootUser.password=replicator \
  --set replication.user="${PASSWORD}" \
  --set replication.password="${PASSWORD}" \
  --set master.persistence.storageClass=managed-nfs-storage \
  --set slave.persistence.storageClass=managed-nfs-storage

參數參考

允許root@%登入(注意:密碼不能隨意變動)

kubectl exec -it -n ${NAMESPACE} ${SERVICE}-master-0 -- mysql -uroot -p${PASSWORD} -e "grant all privileges on *.* to 'root' @'%' identified by '${PASSWORD}'; FLUSH PRIVILEGES;"

變更密碼正確方式:

NEW_PASSWORD="0987654321"
PASSWORD=$(kubectl get secret -n ${NAMESPACE} ${SERVICE} -o jsonpath="{.data.mariadb-root-password}" | base64 --decode)
echo "Password: ${PASSWORD}"
helm upgrade ${NAME} stable/mariadb --set rootUser.password=${NEW_PASSWORD}

ubuntu 16.04 安裝kubernetes 1.8.1

前置工作
安裝kubectl

sudo rm ./kubectl /usr/local/bin/kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl

設定kubeadm來源


sudo apt-get install -y apt-transport-https curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - sudo bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list deb http://apt.kubernetes.io/ kubernetes-xenial main EOF'

安裝kubeadm

sudo apt-get update && sudo apt-get install -y kubelet kubeadm

關閉swap

sudo swapoff -a 
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
sudo mount -a

安裝步驟:

##############################################################################################
##
## init
##
sudo kubeadm init \
  --kubernetes-version=v1.8.1 \
  --pod-network-cidr=10.244.0.0/16 \
  --skip-preflight-checks

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get pods --all-namespaces -o wide

############################################################################################
##
## 安裝網路
##
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')

############################################################################################
##
## 加入節點
##
sudo kubeadm join --token xxxxxxxxxxxxxxxxxxxx \
  10.1.1.1:6443 \
  --discovery-token-ca-cert-hash sha256:yyyyyyyyyyyyyyyyyyyy
mkdir -p $HOME/.kube
scp 10.1.1.1:~/.kube/config $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes


############################################################################################
##
## 加入dashboard, 參考來源: https://github.com/kubernetes/dashboard/wiki/Access-control
##
cat <<EOF > dashboard-admin.yaml
{
"apiVersion": "rbac.authorization.k8s.io/v1beta1",
"kind": "ClusterRoleBinding",
"metadata": {
"name": "kubernetes-dashboard",
"labels": {
"k8s-app": "kubernetes-dashboard"
}
},
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "cluster-admin"
},
"subjects": [
{
"kind": "ServiceAccount",
"name": "kubernetes-dashboard",
"namespace": "kube-system"
}
]
}
EOF
kubectl apply -f dashboard-admin.yaml
rm dashboard-admin.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

############################################################################################
##
## 加入proxy
##
nohup kubectl proxy –address 0.0.0.0 –accept-hosts ‘.*’ >/dev/null 2>&1 &

############################################################################################
##
## 取得kubernetes-dashboard-admin登入dashboard的token (暫時用不到)
##
## kubectl describe -n kube-system secret/$(kubectl -n kube-system get secret | grep kubernetes-dashboard-admin | awk {‘print $1’}) | grep token: | awk {‘print $2’}