重新產生k8s join指令

因為新版的k8s的token已經有時間效期,
所以在安裝完後,隔一陣子想要加入新的節點就會出現舊的join token無法使用.
這時候可以用kubeadm token generate重新產生新的token,並用下列語法印出join指令.

kubeadm token create `kubeadm token generate` --print-join-command --ttl=0

MongoDB Replica Set高可用性的建置

1. 建立三個MongoDB

echo "[info] 建立測試環境";
docker network create mongo-cluster >/dev/null 2>&1
docker rm -f mongo1 mongo2 mongo3 >/dev/null 2>&1
for i in 1 2 3
do
  echo "       mongo${i} 建立"
  docker run \
    -d \
    -p 3000$i:27017 \
    --name mongo$i \
    --net mongo-cluster \
    mongo mongod --replSet my-mongo-set >/dev/null 2>&1
done

2. 接著設定三者關係,並強制設定electionTimeoutMillis為500ms.

echo "       設定mongo cluster"
docker exec -it mongo3 mongo --eval "config={\"_id\":\"my-mongo-set\",\"members\":[{\"_id\":0,\"host\":\"mongo1:27017\"},{\"_id\":1,\"host\":\"mongo2:27017\"},{\"_id\":2,\"host\":\"mongo3:27017\"}],\"settings\": { \"electionTimeoutMillis\": 500 }}; rs.initiate(config)"

3. PHP測試

#https://github.com/mongodb/mongo-php-library
require_once __DIR__ . "/vendor/autoload.php";

try{
  $client = new MongoDB\Client("mongodb://mongo1:27017,mongo2:27017,mongo3:27017",[],[
    'typeMap' => [
      'array' => 'array',
      'document' => 'array',
      'root' => 'array',
    ],
  ]);
  $db = $client->selectDatabase('test');
  $cursor = $db->command(['isMaster' => 1]);
  echo "[info] 成功連線至: {$cursor->toArray()[0]['primary']}\n";
  
} catch(Exception $e) {
  echo "[error] 無法線連\n";
  exit;
}

{  //若Collection:testA不存在則建立.
  if((iterator_count($db->listCollections([
    'filter'=>[
      'name'=>'testA'
    ]
  ])))==0) //當數量為0時,建立testA
    $db->createCollection('testA');
}

//連線至Collection:testA
$collection=$db->selectCollection('testA');

//若沒有任何資料,則建立100則隨機資料
if($collection->count()==0){
  for($i=0;$i<100;$i++){
    $insertOneResult = $collection->insertOne([
      'idx' => $i,
      'value' => "{$i}, rand: ".rand(0,9999),
    ]);
  }
}

//取得最後三筆資料
foreach($collection->find([],[
  'limit' => 3,
  'projection' => ['_id'=>0],
  'sort' => [
    'idx'=>-1
  ]
]) as $rs){
  echo "\tidx:{$rs['idx']}\tvalue:{$rs['value']}\n";
}

dd disk image 建立與擴展

測試dd image的建立與擴展

#建立
dd if=/dev/zero of=test001.img bs=10M count=10
#格式化
mkfs -F test001.img
#建立掛載資料夾
mkdir -p /tmp/disk/test001
#掛載
mount -t ext2 -o loop $PWD/test001.img /tmp/disk/test001
#隨便產生一個90Mb的檔案
dd if=/dev/zero of=/tmp/disk/test001/xxxx.img bs=10M count=9
#查看可用空間
df -h | grep /dev/loop
#解除掛載
umount /tmp/disk/test001

#磁碟檢查
e2fsck -f test001.img -y
#調整空間大小至200M
resize2fs test001.img 200M
#重新掛載
mount -t ext2 -o loop $PWD/test001.img /tmp/disk/test001
#調整空間
losetup -c /dev/loop0
resize2fs /dev/loop0
#查看可用空間
df -h | grep /dev/loop
#解除掛載
umount /tmp/disk/test001

#刪除測試檔案
rm -f -r test001.img

以docker快速建立mysql測試環境

剛剛因為要測試mysql跟mariaDB的語法是否有不同
因此用下列語法快速建立mysql來進行測試

#建立mysql server
docker run \
  --name some-mysql \
  -e MYSQL_ROOT_PASSWORD=abcdefg \
  -d mysql:5.7
sleep 10

#建立mysql client
docker run \
  -it \
  --rm \
  --link some-mysql:mysqlA \
  mysql sh -c 'exec mysql -h mysqlA -uroot -pabcdefg'

SQL測試語法

create database TEST1;
use TEST1;
CREATE TABLE `test` (
  `id` int(11) NOT NULL,
  `geo` point DEFAULT NULL
);
INSERT INTO `test`(`id`, `geo`) VALUES (1,ST_GEOMFROMTEXT('POINT(121 23)',4326));
select * from `test` where 1;

centos 7 筆記

安裝chrome, refer

sudo bash -c 'cat <<EOF > /etc/yum.repos.d/google-chrome.repo
[google-chrome]
name=google-chrome
baseurl=http://dl.google.com/linux/chrome/rpm/stable/$basearch
enabled=1
gpgcheck=1
gpgkey=https://dl-ssl.google.com/linux/linux_signing_key.pub
EOF

'
sudo yum -y install google-chrome-stable

安裝qt5, refer

sudo yum -y install gcc gcc-c++ mesa-libGL-devel mesa-libGLU-devel freeglut-devel
wget http://download.qt.io/archive/qt/5.4/5.4.2/qt-opensource-linux-x64-5.4.2.run
chmod +x qt-opensource-linux-x64-5.4.2.run
sudo ./qt-opensource-linux-x64-5.4.2.run

安裝proj4, refer

sudo yum install epel-release -y
sudo yum install -y proj proj-devel proj-epsg

install & test cuda 9.0 on ubuntu 16.04

安裝

wget https://developer.nvidia.com/compute/cuda/9.0/Prod/local_installers/cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb
sudo service lightdm stop
sudo dpkg -i cuda-repo-ubuntu1604-9-0-local_9.0.176-1_amd64-deb
sudo apt-key add /var/cuda-repo-9-0-local/7fa2af80.pub
sudo apt-get update && sudo apt-get install cuda -y
sudo reboot
sudo ln -s /usr/local/cuda/bin/nvcc /usr/bin/nvcc

設定環境變數

vim ~/.bashrc
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:/usr/lib/nvidia-367
export CUDA_HOME=/usr/local/cuda
export PATH=$PATH:/usr/local/cuda/bin

測試編譯
方法1:

cd /usr/local/cuda/samples/0_Simple/vectorAdd
make
./vectorAdd

方法2:
建立測試檔案vectorAdd.cu

#include <stdio.h>
#include <cuda_runtime.h>

__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements){
    int i = blockDim.x * blockIdx.x + threadIdx.x;
    if (i < numElements){
        C[i] = A[i] + B[i];
    }
}

int main(void){
  int numElements = 50000;

  //初始化測試資料
  float *h_A=new float[numElements];
  float *h_B=new float[numElements];
  float *h_C=new float[numElements];
  for (int i = 0; i < numElements; ++i) {
    h_A[i] = rand()/(float)RAND_MAX;
    h_B[i] = rand()/(float)RAND_MAX;
  }

  //配置GPU記憶體空間,並從記憶體中複製資料至GPU中
  size_t size = numElements * sizeof(float);
  float *d_A = NULL;  cudaMalloc((void **)&d_A, size);
  float *d_B = NULL;  cudaMalloc((void **)&d_B, size);
  float *d_C = NULL;  cudaMalloc((void **)&d_C, size);
  cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
  cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);

  //運算
  int threadsPerBlock = 256;
  int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; 
  vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);

  //取回運算結果
  cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
  
  //清除GPU記憶體空間
  cudaFree(d_A);
  cudaFree(d_B);
  cudaFree(d_C);

  //驗證資料
  for (int i = 0; i < numElements; ++i) {
    if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) {
      fprintf(stderr, "Result verification failed at element %d!\n", i);
      exit(EXIT_FAILURE);
    }
  }

  //清除記憶體
  delete d_A;
  delete d_B;
  delete d_C;

  printf("Test PASSED\n");
  return 0;
}

接著手動編譯~

/usr/local/cuda-9.0/bin/nvcc \
  -ccbin g++  \
  -m64 \
  -gencode arch=compute_30,code=sm_30 \
  -gencode arch=compute_35,code=sm_35 \
  -gencode arch=compute_37,code=sm_37 \
  -gencode arch=compute_50,code=sm_50 \
  -gencode arch=compute_52,code=sm_52 \
  -gencode arch=compute_60,code=sm_60 \
  -gencode arch=compute_70,code=sm_70 \
  -gencode arch=compute_70,code=compute_70 \
  -c vectorAdd.cu -o vectorAdd.o

/usr/local/cuda-9.0/bin/nvcc \
  -ccbin g++ \
  -m64 \
  -gencode arch=compute_30,code=sm_30 \
  -gencode arch=compute_35,code=sm_35 \
  -gencode arch=compute_37,code=sm_37 \
  -gencode arch=compute_50,code=sm_50 \
  -gencode arch=compute_52,code=sm_52 \
  -gencode arch=compute_60,code=sm_60 \
  -gencode arch=compute_70,code=sm_70 \
  -gencode arch=compute_70,code=compute_70 \
  vectorAdd.o -o vectorAdd

ubuntu 16.04 安裝kubernetes 1.8.1

前置工作
安裝kubectl

sudo rm ./kubectl /usr/local/bin/kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl

設定kubeadm來源

sudo apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF'

安裝kubeadm

sudo apt-get update && sudo apt-get install -y kubelet kubeadm

關閉swap

sudo swapoff -a 
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
sudo mount -a

安裝步驟:

##############################################################################################
##
## init
##
sudo kubeadm init \
  --kubernetes-version=v1.8.1 \
  --pod-network-cidr=10.244.0.0/16 \
  --skip-preflight-checks

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

kubectl get pods --all-namespaces -o wide

############################################################################################
##
## 安裝網路
##
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')

############################################################################################
##
## 加入節點
##
sudo kubeadm join --token xxxxxxxxxxxxxxxxxxxx \
  10.1.1.1:6443 \
  --discovery-token-ca-cert-hash sha256:yyyyyyyyyyyyyyyyyyyy
mkdir -p $HOME/.kube
scp 10.1.1.1:~/.kube/config $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes


############################################################################################
##
## 加入dashboard, 參考來源: https://github.com/kubernetes/dashboard/wiki/Access-control 
##
cat <<EOF > dashboard-admin.yaml
{
  "apiVersion": "rbac.authorization.k8s.io/v1beta1", 
  "kind": "ClusterRoleBinding", 
  "metadata": {
    "name": "kubernetes-dashboard",
    "labels": {
      "k8s-app": "kubernetes-dashboard"
    }
  },
  "roleRef": {
    "apiGroup": "rbac.authorization.k8s.io", 
    "kind": "ClusterRole", 
    "name": "cluster-admin"
  }, 
  "subjects": [
    {
      "kind": "ServiceAccount", 
      "name": "kubernetes-dashboard",
      "namespace": "kube-system"
    }
  ]
}
EOF
kubectl apply -f dashboard-admin.yaml 
rm dashboard-admin.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

############################################################################################
##
## 加入proxy
##
nohup kubectl proxy --address 0.0.0.0 --accept-hosts '.*' >/dev/null 2>&1 &

############################################################################################
##
## 取得kubernetes-dashboard-admin登入dashboard的token (暫時用不到)
##
## kubectl describe -n kube-system secret/$(kubectl -n kube-system get secret | grep kubernetes-dashboard-admin | awk {'print $1'}) | grep token: | awk {'print $2'}