切換root
sudo -i
將使用者加入 docker,並將/var/run/docker.sock權限設定為docker group
synogroup --add docker <your_username>
chown root:docker /var/run/docker.sock
切換root
sudo -i
將使用者加入 docker,並將/var/run/docker.sock權限設定為docker group
synogroup --add docker <your_username>
chown root:docker /var/run/docker.sock
mkdir -p $PWD/adguardhome/workdir
mkdir -p $PWD/adguardhome/confdir
docker rm -f adguardhome
docker run \
-d \
--name=adguardhome \
--restart=always \
-v $PWD/adguardhome/workdir:/opt/adguardhome/work \
-v $PWD/adguardhome/confdir:/opt/adguardhome/conf \
-p 53:53/tcp \
-p 53:53/udp \
-p 67:67/udp \
-p 68:68/tcp \
-p 68:68/udp \
-p 80:80/tcp \
-p 443:443/tcp \
-p 853:853/tcp \
-p 3000:3000/tcp \
adguard/adguardhome
gitlab-rails console
Ci::Runner.all.update_all(token_encrypted: nil)
並重啟gitlab
gitlab-rails dbconsole
gitlabhq_production=> UPDATE projects SET runners_token = null, runners_token_encrypted = null;
gitlabhq_production=> UPDATE namespaces SET runners_token = null, runners_token_encrypted = null;
gitlabhq_production=> UPDATE application_settings SET runners_registration_token_encrypted = null;
並重啟gitlab
#確認憑證
kubeadm alpha certs check-expiration
#匯出設定
kubeadm config view > /root/kubeadm.yaml
更新憑證
kubeadm alpha certs renew all --config=/root/kubeadm.yaml
kubeadm alpha certs check-expiration
#刪除
rm -rf /root/kubeadm.yaml
#重啟
docker ps |grep -E 'k8s_kube-apiserver|k8s_kube-controller-manager|k8s_kube-scheduler|k8s_etcd_etcd' | awk -F ' ' '{print $1}' |xargs docker restart
kubectl create ns slanla
kubectl -n tw-sgis create sa slanla
cat <<EOF > slanla-user-role.yml
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: tw-sgis
name: slanla-user-pod
rules:
- apiGroups: ["*"]
resources: ["pods", "pods/log"]
verbs: ["get", "watch", "list", "update", "create", "delete"]
EOF
kubectl apply -f slanla-user-role.yml
kubectl create rolebinding slanla-view-pod \
--role=slanla-user-pod \
--serviceaccount=tw-sgis:slanla \
--namespace=tw-sgis
SECRET=$(kubectl -n tw-sgis get sa slanla -o go-template='{{range .secrets}}{{.name}}{{end}}')
API_SERVER="https://xxx.xxx.xxx.xxx:6443"
CA_CERT=$(kubectl -n tw-sgis get secret ${SECRET} -o yaml | awk '/ca.crt:/{print $2}')
cat <<EOF > slanla.conf
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority-data: $CA_CERT
server: $API_SERVER
name: cluster
EOF
TOKEN=$(kubectl -n tw-sgis get secret ${SECRET} -o go-template='{{.data.token}}')
kubectl config set-credentials slanla-user \
--token=`echo ${TOKEN} | base64 -d` \
--kubeconfig=slanla.conf
kubectl config set-context default \
--cluster=cluster \
--user=slanla-user \
--kubeconfig=slanla.conf
kubectl config use-context default \
--kubeconfig=slanla.conf
最近在raspberry pi上面安裝docker 18.09後,
會發生無法啟動docker的問題.
因此需要降版至18.06,語法如下:
sudo apt-mark unhold docker-ce
sudo apt-get purge -y docker-ce
sudo apt-get autoremove -y --purge docker-ce
sudo apt-get autoclean
sudo rm -rf /var/lib/docker
export VERSION=18.06 && curl -sSL get.docker.com | sh
sudo apt-mark hold docker-ce
在*.vmx中加入下列參數.
vhv.enable = "TRUE"
請注意不要加入下列參數,會無法開機!
hypervisor.cpuid.v0 = "FALSE"
server: 1.2.3.4
port: 514/dup
docker rm -f logspout
docker run \
-d \
--name=logspout \
--restart=always \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /etc/localtime:/etc/localtime \
-p 10080:80 \
-e RAW_FORMAT="{{.Data}}" \
gliderlabs/logspout \
syslog+udp://1.2.3.4:514
docker logs -f logspout
其中10.172.254.254是Getway,不要自作主張改成 192.168.1.0/24 or 10.172.0.0之類的..
yum install bridge-utils -y
service docker stop
ip link set dev docker0 down
brctl delbr docker0
iptables -t nat -F POSTROUTING
brctl addbr docker0
ip addr add 10.172.254.254/16 dev docker0
ip link set dev docker0 up
cat << EOF > /etc/docker/daemon.json
{
"bip": "10.172.254.254/16"
}
EOF
systemctl daemon-reload
systemctl restart docker.service
reboot #如果有k8s則需要重開機
#安裝 docker-ce
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce-18.06.0.ce -y
systemctl enable docker && systemctl start docker
# 移除舊版nvidia-docker
docker volume ls -q -f driver=nvidia-docker | xargs -r -I{} -n1 docker ps -q -a -f volume={} | xargs -r docker rm -f
sudo yum remove nvidia-docker
# 加入repositories
distribution=$(. /etc/os-release;echo $ID$VERSION_ID)
curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.repo | sudo tee /etc/yum.repos.d/nvidia-docker.repo
# 安裝 nvidia-docker2
sudo yum install -y nvidia-docker2
sudo pkill -SIGHUP dockerd
# 測試
docker run --runtime=nvidia --rm nvidia/cuda:9.0-base nvidia-smi