刪除無作用的docker image

在使用docker build建立image的過程中,往往會產生很多個none的docker images

因此可以透過下列指令刪除

docker rmi $(docker images | grep none | awk '{print $3}')

centos 7 筆記

安裝chrome, refer

sudo bash -c 'cat <<EOF > /etc/yum.repos.d/google-chrome.repo
[google-chrome]
name=google-chrome
baseurl=http://dl.google.com/linux/chrome/rpm/stable/$basearch
enabled=1
gpgcheck=1
gpgkey=https://dl-ssl.google.com/linux/linux_signing_key.pub
EOF

'
sudo yum -y install google-chrome-stable

安裝qt5, refer

sudo yum -y install gcc gcc-c++ mesa-libGL-devel mesa-libGLU-devel freeglut-devel
wget http://download.qt.io/archive/qt/5.4/5.4.2/qt-opensource-linux-x64-5.4.2.run
chmod +x qt-opensource-linux-x64-5.4.2.run
sudo ./qt-opensource-linux-x64-5.4.2.run

安裝proj4, refer

sudo yum install epel-release -y
sudo yum install -y proj proj-devel proj-epsg

遷移gitlab

我自建的gitlab是跑在synology上,不過因為效能太差了
因此決定將gitlab移植到vm上.

之前測試了讓vm掛載synology網芳的gitlab data的資料夾,
但是都會發生權限的問題.

因此目前打算讓gitlab data的資料夾也放在vm的本機端.
再透過定期備份的方式,來保全資料.
以下是我移機的紀錄

備份

docker exec -it gitlab gitlab-rake gitlab:backup:create

還原-前置工作
建立資料夾

mkdir -p $PWD/files/data/backups
mkdir -p $PWD/files/conf
mkdir -p $PWD/files/log/

複製備份資料

cp $OLD_GITLAB_PATH/backups/1510018625_2017_11_07_10.1.0_gitlab_backup.tar $PWD/files/data/backups

執行新的gitlab

docker run -d \
  --name=gitlab \
  --publish 443:443 \
  --publish 80:80 \
  --restart always \
  -v $PWD/files/conf:/etc/gitlab \
  -v $PWD/files/log:/var/log/gitlab \
  -v $PWD/files/data:/var/opt/gitlab \
  gitlab/gitlab-ce:10.1.0-ce.0

等待兩三分鐘後,確定可以進入gitlab web ui介面後
開始進入還原工作

還原
進入container

docker exec -it gitlab bash

停止服務

gitlab-ctl stop unicorn
gitlab-ctl stop sidekiq

設定權限

chmod 777 /var/opt/gitlab/backups/1510018625_2017_11_07_10.1.0_gitlab_backup.tar

還原

gitlab-rake gitlab:backup:restore BACKUP=1510018625_2017_11_07_10.1.0

重啟服務

gitlab-ctl restart

check

gitlab-rake gitlab:check SANITIZE=true

install & test cuda 9.0 on ubuntu 16.04

安裝

wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb
sudo service lightdm stop
sudo dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb
sudo apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub
sudo apt-get update && sudo apt-get install cuda -y
sudo reboot
sudo ln -s /usr/local/cuda/bin/nvcc /usr/bin/nvcc

設定環境變數

vim ~/.bashrc
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/lib/nvidia-367
export CUDA_HOME=/usr/local/cuda
export PATH=$PATH:/usr/local/cuda/bin

測試編譯
方法1:

cd /usr/local/cuda/samples/0_Simple/vectorAdd
make
./vectorAdd

方法2:
建立測試檔案vectorAdd.cu

#include <stdio.h>
#include <cuda_runtime.h>

__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements){
    int i = blockDim.x * blockIdx.x + threadIdx.x;
    if (i < numElements){
        C[i] = A[i] + B[i];
    }
}

int main(void){
  int numElements = 50000;

  //初始化測試資料
  float *h_A=new float[numElements];
  float *h_B=new float[numElements];
  float *h_C=new float[numElements];
  for (int i = 0; i < numElements; ++i) {
    h_A[i] = rand()/(float)RAND_MAX;
    h_B[i] = rand()/(float)RAND_MAX;
  }

  //配置GPU記憶體空間,並從記憶體中複製資料至GPU中
  size_t size = numElements * sizeof(float);
  float *d_A = NULL;  cudaMalloc((void **)&d_A, size);
  float *d_B = NULL;  cudaMalloc((void **)&d_B, size);
  float *d_C = NULL;  cudaMalloc((void **)&d_C, size);
  cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
  cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

  //運算
  int threadsPerBlock = 256;
  int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; 
  vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);

  //取回運算結果
  cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
  
  //清除GPU記憶體空間
  cudaFree(d_A);
  cudaFree(d_B);
  cudaFree(d_C);

  //驗證資料
  for (int i = 0; i < numElements; ++i) {
    if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) {
      fprintf(stderr, "Result verification failed at element %d!\n", i);
      exit(EXIT_FAILURE);
    }
  }

  //清除記憶體
  delete d_A;
  delete d_B;
  delete d_C;

  printf("Test PASSED\n");
  return 0;
}

接著手動編譯~

/usr/local/cuda-9.0/bin/nvcc \
  -ccbin g++  \
  -m64 \
  -gencode arch=compute_30,code=sm_30 \
  -gencode arch=compute_35,code=sm_35 \
  -gencode arch=compute_37,code=sm_37 \
  -gencode arch=compute_50,code=sm_50 \
  -gencode arch=compute_52,code=sm_52 \
  -gencode arch=compute_60,code=sm_60 \
  -gencode arch=compute_70,code=sm_70 \
  -gencode arch=compute_70,code=compute_70 \
  -c vectorAdd.cu -o vectorAdd.o

/usr/local/cuda-9.0/bin/nvcc \
  -ccbin g++ \
  -m64 \
  -gencode arch=compute_30,code=sm_30 \
  -gencode arch=compute_35,code=sm_35 \
  -gencode arch=compute_37,code=sm_37 \
  -gencode arch=compute_50,code=sm_50 \
  -gencode arch=compute_52,code=sm_52 \
  -gencode arch=compute_60,code=sm_60 \
  -gencode arch=compute_70,code=sm_70 \
  -gencode arch=compute_70,code=compute_70 \
  vectorAdd.o -o vectorAdd

ubuntu 16.04 安裝kubernetes 1.8.1

前置工作
安裝kubectl

sudo rm ./kubectl /usr/local/bin/kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl

設定kubeadm來源

sudo apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF'

安裝kubeadm

sudo apt-get update && sudo apt-get install -y kubelet kubeadm

關閉swap

sudo swapoff -a 
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
sudo mount -a

安裝步驟:

##############################################################################################
##
## init
##
sudo kubeadm init \
  --kubernetes-version=v1.8.1 \
  --pod-network-cidr=10.244.0.0/16 \
  --skip-preflight-checks

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get pods --all-namespaces -o wide

############################################################################################
##
## 安裝網路
##
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')

############################################################################################
##
## 加入節點
##
sudo kubeadm join --token xxxxxxxxxxxxxxxxxxxx \
  10.1.1.1:6443 \
  --discovery-token-ca-cert-hash sha256:yyyyyyyyyyyyyyyyyyyy
mkdir -p $HOME/.kube
scp 10.1.1.1:~/.kube/config $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes


############################################################################################
##
## 加入dashboard, 參考來源: https://github.com/kubernetes/dashboard/wiki/Access-control 
##
cat <<EOF > dashboard-admin.yaml
{
  "apiVersion": "rbac.authorization.k8s.io/v1beta1", 
  "kind": "ClusterRoleBinding", 
  "metadata": {
    "name": "kubernetes-dashboard",
    "labels": {
      "k8s-app": "kubernetes-dashboard"
    }
  },
  "roleRef": {
    "apiGroup": "rbac.authorization.k8s.io", 
    "kind": "ClusterRole", 
    "name": "cluster-admin"
  }, 
  "subjects": [
    {
      "kind": "ServiceAccount", 
      "name": "kubernetes-dashboard",
      "namespace": "kube-system"
    }
  ]
}
EOF
kubectl apply -f dashboard-admin.yaml 
rm dashboard-admin.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

############################################################################################
##
## 加入proxy
##
nohup kubectl proxy --address 0.0.0.0 --accept-hosts '.*' >/dev/null 2>&1 &

############################################################################################
##
## 取得kubernetes-dashboard-admin登入dashboard的token (暫時用不到)
##
## kubectl describe -n kube-system secret/$(kubectl -n kube-system get secret | grep kubernetes-dashboard-admin | awk {'print $1'}) | grep token: | awk {'print $2'}

解決Ubuntu出現[W:mdadm: /etc/mdadm/mdadm.conf defines no arrays]的問題

Ubuntu 16.04 LTS每當出現Kernel update時,
就會出現W:mdadm: /etc/mdadm/mdadm.conf defines no arrays

解決方法:
刪除mdadm.conf文件

sudo rm /etc/mdadm/mdadm.conf

接著用update-initramfs命令,重新產生mdadm.conf

sudo update-initramfs -u

總結:用下列指令解決~

sudo rm /etc/mdadm/mdadm.conf && sudo update-initramfs -u