91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

編譯安裝kubernetes 1.15.3

發布時間:2020-09-08 07:11:05 來源:網絡 閱讀:3287 作者:juestnow 欄目:系統運維

環境:

操作系統: win10 on Ubuntu 18.04.3 LTS (GNU/Linux 4.4.0-18362-Microsoft x86_64)
GO 版本 go version go1.12.9 linux/amd64
GCC 版本 gcc version 7.4.0 (Ubuntu 7.4.0-1ubuntu1~18.04.1)
make 版本 Make 4.1
部署服務器: etcd 192.168.30.31 kubernetes 192.168.30.32
部署環境:centos 7.6
業務部署目錄:/apps/業務
K8S 網段 10.66.0.0/16
docker pod 網段 10.67.0.0/16
使用kube-router 提供網絡服務

go 環境部署

cd /usr/local/src
wget https://dl.google.com/go/go1.12.9.linux-amd64.tar.gz
tar -xvf go1.12.9.linux-amd64.tar.gz
mv go ../
vi /etc/profile
export GOPATH=/mnt/e/work/go
export GOBIN=/mnt/e/work/go/bin
PATH=$PATH:/usr/local/go/bin:$HOME/bin:$GOBIN
export PATH
source /etc/profile
go version 
root@Qist:~# go version
go version go1.12.9 linux/amd64
創建go 工作目錄BIN目錄
mkdir -p /mnt/e/work/go/{bin,src,pkg}

安裝編譯kubernetes 1.15.3 依賴

apt -y install make gcc

編譯kubernetes 1.15.3

wget https://github.com/kubernetes/kubernetes/archive/v1.15.3.tar.gz
tar -xvf v1.15.3.tar.gz
cd kubernetes-1.15.3/
make
cd ./_output/local/bin/linux/amd64
mkdir -p /mnt/e/work/k8s/bin
cp -pdr kube* /mnt/e/work/k8s/bin/

編譯證書生成工具

go get  github.com/cloudflare/cfssl/cmd/cfssl
go get  github.com/cloudflare/cfssl/cmd/cfssljson

部署etcd

#etcd 使用二進制方式部署編譯依賴會用到墻外的東西環境設置比較麻煩 etcd 節點服務器操作
wget https://github.com/etcd-io/etcd/releases/download/v3.4.0/etcd-v3.4.0-linux-amd64.tar.gz
mkdir -p /apps/etcd/{bin,conf,ssl,data}
# 數據存儲目錄
mkdir -p /apps/etcd/data/default.etcd
# 創建 etcd 用戶
 useradd etcd -s /sbin/nologin -M
 # 解壓etcd
 tar -xvf  etcd-v3.4.0-linux-amd64.tar.gz
 # cp 可執行文件到工作目錄
 cd etcd-v3.4.0-linux-amd64/
 cp -pdr etcd  etcdctl /apps/etcd/bin/
 # etcd 證書 在win on Ubuntu 操作
 mkdir -p /apps/work/k8s/cfssl/ && \
cat << EOF | tee /apps/work/k8s/cfssl/ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
# 創建etcd ca證書配置
mkdir -p /apps/work/k8s/cfssl/etcd
cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd-ca-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "cluster",
"OU": "cluster"
}
]
}
EOF
# 生成 ETCD CA 證書和私鑰
mkdir -p /apps/work/k8s/cfssl/pki/etcd
cfssl gencert -initca /apps/work/k8s/cfssl/etcd/etcd-ca-csr.json | cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd-ca
# 創建 ETCD Server 證書 
export ETCD_SERVER_IPS=" \
\"192.168.30.31\" \
" && \
export ETCD_SERVER_HOSTNAMES=" \
\"etcd\" \
" && \
cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd_server.json
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
${ETCD_SERVER_IPS},
${ETCD_SERVER_HOSTNAMES}
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "cluster",
"OU": "cluster"
}
]
}
EOF
# 生成 ETCD Server 證書和私鑰
cfssl gencert \
-ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \
-ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \
-config=/apps/work/k8s/cfssl/ca-config.json \
-profile=kubernetes \
/apps/work/k8s/cfssl/etcd/etcd_server.json | \
cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_server
# 創建 ETCD Member 證書
export ETCD_MEMBER_1_IP=" \
    \"192.168.30.31\" \
" && \
export ETCD_MEMBER_1_HOSTNAMES="etcd\
" && \
cat << EOF | tee /apps/work/k8s/cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    ${ETCD_MEMBER_1_IP},
    "${ETCD_MEMBER_1_HOSTNAMES}"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "cluster",
      "OU": "cluster"
    }
  ]
}
EOF
##### 生成 ETCD Member 1 證書和私鑰
cfssl gencert \
    -ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \
    -ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \
    -config=/apps/work/k8s/cfssl/ca-config.json \
    -profile=kubernetes \
    /apps/work/k8s/cfssl/etcd/${ETCD_MEMBER_1_HOSTNAMES}.json | \
    cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_member_${ETCD_MEMBER_1_HOSTNAMES}
# 創建 ETCD Client 配置文件
cat << EOF | tee /apps/work/k8s/cfssl/etcd/etcd_client.json
{
"CN": "client",
"hosts": [""], 
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "cluster",
"OU": "cluster"
}
]
}
EOF
#生成 ETCD Client 證書和私鑰

cfssl gencert \
-ca=/apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem \
-ca-key=/apps/work/k8s/cfssl/pki/etcd/etcd-ca-key.pem \
-config=/apps/work/k8s/cfssl/ca-config.json \
-profile=kubernetes \
/apps/work/k8s/cfssl/etcd/etcd_client.json | \
cfssljson -bare /apps/work/k8s/cfssl/pki/etcd/etcd_client
# 復制證書到etcd 節點服務器
scp -r /apps/work/k8s/cfssl/pki/etcd/*  192.168.30.31:/apps/etcd/ssl
# 配置etcd 啟動文件 etcd 服務器操作
vi /apps/etcd/conf/etcd
ETCD_OPTS="--name=etcd \
           --data-dir=/apps/etcd/data/default.etcd \
           --listen-peer-urls=https://192.168.30.31:2380 \
           --listen-client-urls=https://192.168.30.31:2379,https://127.0.0.1:2379 \
           --advertise-client-urls=https://192.168.30.31:2379 \
           --initial-advertise-peer-urls=https://192.168.30.31:2380 \
           --initial-cluster=etcd=https://192.168.30.31:2380\
           --initial-cluster-token=node4=etcd=https://192.168.30.31:2380 \
           --initial-cluster-state=new \
           --heartbeat-interval=6000 \
           --election-timeout=30000 \
           --snapshot-count=5000 \
           --auto-compaction-retention=1 \
           --max-request-bytes=33554432 \
           --quota-backend-bytes=17179869184 \
           --trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem \
           --cert-file=/apps/etcd/ssl/etcd_server.pem \
           --key-file=/apps/etcd/ssl/etcd_server-key.pem \
           --peer-cert-file=/apps/etcd/ssl/etcd_member_etcd.pem \
           --peer-key-file=/apps/etcd/ssl/etcd_member_etcd-key.pem \
           --peer-client-cert-auth \
           --peer-trusted-ca-file=/apps/etcd/ssl/etcd-ca.pem"
# 配置etcd 啟動service 文件
vi /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
User=etcd
Group=etcd
EnvironmentFile=-/apps/etcd/conf/etcd
ExecStart=/apps/etcd/bin/etcd $ETCD_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target

# etcd 目錄 etcd 用戶權限
chown -R etcd.etcd /apps/etcd
# 啟動 etcd
systemctl start etcd
# 設置開機啟動
systemctl enable etcd
# 設置環境變量
 /etc/profile
 export ETCDCTL_API=3
export ENDPOINTS=https://192.168.30.31:2379
#執行
source /etc/profile
vim ~/.bashrc
alias etcdctl='/apps/etcd/bin/etcdctl --endpoints=${ENDPOINTS} --cacert=/apps/etcd/ssl/etcd-ca.pem'
source ~/.bashrc
# 查看狀態
etcdctl endpoint health
[root@etcd ~]# etcdctl endpoint health
https://192.168.30.31:2379 is healthy: successfully committed proposal: took = 16.707114ms

kube-apiserver 部署

# win on Ubuntu 操作
#創建kube-apiserver ca證書配置
mkdir -p /apps/work/k8s/cfssl/k8s
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "cluster",
"OU": "cluster"
}
]
}
EOF
#生成 Kubernetes CA 證書和私鑰

mkdir -p /apps/work/k8s/cfssl/pki/k8s
cfssl gencert -initca /apps/work/k8s/cfssl/k8s/k8s-ca-csr.json | \
cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s-ca
#創建 Kubernetes API Server 證書配置文件
export K8S_APISERVER_VIP=" \
\"192.168.30.32\" \
" && \
export K8S_APISERVER_SERVICE_CLUSTER_IP="10.66.0.1" && \
export K8S_APISERVER_HOSTNAME="api.k8s.cluster.local" && \
export K8S_CLUSTER_DOMAIN_SHORTNAME="cluster" && \
export K8S_CLUSTER_DOMAIN_FULLNAME="cluster.local" && \
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_apiserver.json
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
${K8S_APISERVER_VIP},
"${K8S_APISERVER_SERVICE_CLUSTER_IP}", 
"${K8S_APISERVER_HOSTNAME}",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_SHORTNAME}",
"kubernetes.default.svc.${K8S_CLUSTER_DOMAIN_FULLNAME}" 
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "GuangDong",
"L": "GuangZhou",
"O": "cluster",
"OU": "cluster"
}
]
}
EOF
#生成 Kubernetes API Server 證書和私鑰

cfssl gencert \
-ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
-ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
-config=/apps/work/k8s/cfssl/ca-config.json \
-profile=kubernetes \
/apps/work/k8s/cfssl/k8s/k8s_apiserver.json | \
cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_server
# 創建 Kubernetes webhook 證書配置文件
cat << EOF | tee /apps/work/k8s/cfssl/k8s/aggregator.json
{
  "CN": "aggregator",
  "hosts": [""], 
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "cluster",
      "OU": "cluster"
    }
  ]
}
EOF

##### 生成 Kubernetes webhook 證書和私鑰
cfssl gencert \
    -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
    -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
    -config=/apps/work/k8s/cfssl/ca-config.json \
    -profile=kubernetes \
    /apps/work/k8s/cfssl/k8s/aggregator.json | \
    cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/aggregator
    # 遠程服務器創建目錄
    mkdir -p /apps/kubernetes/{bin,conf,config,kubelet-plugins,log,ssl}
 #證書到分發到 192.168.30.32
 scp -r /apps/work/k8s/cfssl/pki/k8s 192.168.30.32:/apps/kubernetes/ssl/k8s
 # cp  etcd_client 證書
  scp -r /apps/work/k8s/cfssl/pki/etcd/etcd_client* 192.168.30.32:/apps/kubernetes/ssl/etcd
    scp -r /apps/work/k8s/cfssl/pki/etcd/etcd-ca.pem 192.168.30.32:/apps/kubernetes/ssl/etcd
# 分發kubernetes 二進制文件到遠程服務器把所有的二進制都cp 過去
scp -r /mnt/e/work/k8s/bin/* 192.168.30.32:/apps/kubernetes/bin
# 遠程服務器操作192.168.30.32
 # 創建 k8s 用戶
 useradd k8s -s /sbin/nologin -M
 # encryption-config.yaml 生成
 cd /apps/kubernetes/config
 export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
 cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF
# Apiserver配置文件生成
cd  /apps/kubernetes/conf
vi  kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=false \
        --bind-address=192.168.30.32 \
        --advertise-address=192.168.30.32 \
        --secure-port=5443 \
        --insecure-port=0 \
        --service-cluster-ip-range=10.66.0.0/16 \
        --service-node-port-range=30000-65000 \
        --etcd-cafile=/apps/kubernetes/ssl/etcd/etcd-ca.pem \
        --etcd-certfile=/apps/kubernetes/ssl/etcd/etcd_client.pem \
        --etcd-keyfile=/apps/kubernetes/ssl/etcd/etcd_client-key.pem \
        --etcd-prefix=/registry \
        --etcd-servers=https://192.168.30.31:2379 \
        --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
        --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_server.pem \
        --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_server-key.pem \
        --kubelet-client-certificate=/apps/kubernetes/ssl/k8s/k8s_server.pem \
        --kubelet-client-key=/apps/kubernetes/ssl/k8s/k8s_server-key.pem \
        --service-account-key-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
        --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
        --proxy-client-cert-file=/apps/kubernetes/ssl/k8s/aggregator.pem \
        --proxy-client-key-file=/apps/kubernetes/ssl/k8s/aggregator-key.pem \
        --requestheader-allowed-names=aggregator \
        --requestheader-group-headers=X-Remote-Group \
        --requestheader-extra-headers-prefix=X-Remote-Extra- \
        --requestheader-username-headers=X-Remote-User \
        --enable-aggregator-routing=true \
        --anonymous-auth=false \
        --allow-privileged=true \
        --experimental-encryption-provider-config=/apps/kubernetes/config/encryption-config.yaml \
        --enable-admission-plugins=AlwaysPullImages,DefaultStorageClass,DefaultTolerationSeconds,LimitRanger,NamespaceExists,NamespaceLifecycle,NodeRestriction,OwnerReferencesPermissionEnforcement,PodNodeSelector,PersistentVolumeClaimResize,PodPreset,PodTolerationRestriction,ResourceQuota,ServiceAccount,StorageObjectInUseProtection MutatingAdmissionWebhook ValidatingAdmissionWebhook \
        --disable-admission-plugins=DenyEscalatingExec,ExtendedResourceToleration,ImagePolicyWebhook,LimitPodHardAntiAffinityTopology,NamespaceAutoProvision,Priority,EventRateLimit,PodSecurityPolicy \
        --cors-allowed-origins=.* \
        --enable-swagger-ui \
        --runtime-config=api/all=true \
        --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
        --authorization-mode=Node,RBAC \
        --apiserver-count=1 \
        --audit-log-maxage=30 \
        --audit-log-maxbackup=3 \
        --audit-log-maxsize=100 \
        --kubelet-https \
        --event-ttl=1h \
        --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \
        --enable-bootstrap-token-auth=true \
        --audit-log-path=/apps/kubernetes/log/api-server-audit.log \
        --alsologtostderr=true \
        --log-dir=/apps/kubernetes/log \
        --v=2 \
        --endpoint-reconciler-type=lease \
        --max-mutating-requests-inflight=100 \
        --max-requests-inflight=500 \
        --target-ram-mb=6000"
#  kube-apiserver systemd文件
vi /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
Type=notify
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kube-apiserver
ExecStart=/apps/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
User=k8s
[Install]
WantedBy=multi-user.target
# /apps/kubernetes 目錄k8s 權限
chown -R k8s.k8s /apps/kubernetes
# 啟動kube-apiserver
systemctl start kube-apiserver
# 設置開機啟動
systemctl enable kube-apiserver
#創建admin管理員證書  win on Ubuntu  操作
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_apiserver_admin.json
{
  "CN": "admin",
  "hosts": [""], 
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

cfssl gencert -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
    -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
    -config=/apps/work/k8s/cfssl/ca-config.json \
    -profile=kubernetes \
        /apps/work/k8s/cfssl/k8s/k8s_apiserver_admin.json | \
        cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin

# 創建admin管理員登錄 admin.kubeconfig
export KUBE_APISERVER="https://192.168.30.32:5443"
kubectl config set-cluster kubernetes \
--certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
--embed-certs=true  \
--server=${KUBE_APISERVER} \
--kubeconfig=admin.kubeconfig

 kubectl config set-credentials admin \
 --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin.pem \
 --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_apiserver_admin-key.pem \
 --embed-certs=true \
 --kubeconfig=admin.kubeconfig

kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin \
--namespace=kube-system \
--kubeconfig=admin.kubeconfig

kubectl config use-context kubernetes --kubeconfig=admin.kubeconfig
# cp 證書到當前用戶目錄
cp admin.kubeconfig ~/.kube/config
# 驗證kube-apiserver 是否正常
kubectl cluster-info 
[root@]~]#kubectl cluster-info
Kubernetes master is running at https://192.168.30.32:5443

kube_scheduler 部署

#  生成kube_scheduler訪問kube-apiserver 證書win on Ubuntu  操作
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_scheduler.json
{
  "CN": "system:kube-scheduler",
  "hosts": [""], 
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "system:kube-scheduler",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

##  生成 Kubernetes Scheduler 證書和私鑰
cfssl gencert \
    -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
    -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
    -config=/apps/work/k8s/cfssl/ca-config.json \
    -profile=kubernetes \
    /apps/work/k8s/cfssl/k8s/k8s_scheduler.json | \
    cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_scheduler
#創建kube_scheduler.kubeconfig
kubectl config set-cluster kubernetes \
    --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
    --embed-certs=true \
    --server=${KUBE_APISERVER} \
    --kubeconfig=kube_scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler \
    --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_scheduler.pem \
    --embed-certs=true \
    --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_scheduler-key.pem \
    --kubeconfig=kube_scheduler.kubeconfig
kubectl config set-context kubernetes \
    --cluster=kubernetes \
    --user=system:kube-scheduler \
    --kubeconfig=kube_scheduler.kubeconfig

kubectl config use-context kubernetes --kubeconfig=kube_scheduler.kubeconfig
# cp kube_scheduler.kubeconfig 到遠程服務器
scp kube_scheduler.kubeconfig 192.168.30.32:/apps/kubernetes/config
# 遠程服務器操作
cd /apps/kubernetes/conf
# 創建kube-scheduler 啟動配置文件
vi kube-scheduler
KUBE_SCHEDULER_OPTS=" \
                   --logtostderr=false \
                   --address=0.0.0.0 \
                   --leader-elect=true \
                   --kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \
                   --authentication-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \
                   --authorization-kubeconfig=/apps/kubernetes/config/kube_scheduler.kubeconfig \
                   --alsologtostderr=true \
                   --kube-api-qps=100 \
                   --kube-api-burst=100 \
                   --log-dir=/apps/kubernetes/log \
                   --v=2"
# 創建/kube-scheduler 啟動文件

vi /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kube-scheduler
ExecStart=/apps/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
User=k8s
[Install]
WantedBy=multi-user.target
# 給新創建文件 k8s 用戶權限
chown -R k8s.k8s /apps/kubernetes
# 啟動 kube-scheduler
systemctl start kube-scheduler
# 設置開啟啟動
systemctl enable kube-scheduler
# 驗證狀態
 kubectl get cs
 [root@]~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
controller-manager   Healthy   ok
scheduler            Healthy   ok
etcd-0               Healthy   {"health":"true"}

kube-controller-manager部署

# 生成kube-controller-manager訪問kube-apiserver 證書 win on Ubuntu  操作
cat << EOF | tee /apps/work/k8s/cfssl/k8s/k8s_controller_manager.json
{
  "CN": "system:kube-controller-manager",
  "hosts": [""], 
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "system:kube-controller-manager",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

## 生成 Kubernetes Controller Manager 證書和私鑰
cfssl gencert \
    -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
    -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
    -config=/apps/work/k8s/cfssl/ca-config.json \
    -profile=kubernetes \
    /apps/work/k8s/cfssl/k8s/k8s_controller_manager.json | \
    cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager
# 創建kube_controller_manager.kubeconfig
kubectl config set-cluster kubernetes \
   --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
   --embed-certs=true \
   --server=${KUBE_APISERVER} \
   --kubeconfig=kube_controller_manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
   --client-certificate=/apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager.pem \
   --embed-certs=true \
   --client-key=/apps/work/k8s/cfssl/pki/k8s/k8s_controller_manager-key.pem \
   --kubeconfig=kube_controller_manager.kubeconfig

kubectl config set-context kubernetes \
   --cluster=kubernetes \
   --user=system:kube-controller-manager \
   --kubeconfig=kube_controller_manager.kubeconfig

kubectl config use-context kubernetes --kubeconfig=kube_controller_manager.kubeconfig
# cp kube_controller_manager.kubeconfig 到遠程服務器
scp kube_controller_manager.kubeconfig 192.168.30.32:/apps/kubernetes/config
#  遠程服務器操作
cd /apps/kubernetes/conf
vi kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
 --leader-elect=true \
 --address=0.0.0.0 \
 --service-cluster-ip-range=10.66.0.0/16 \
 --cluster-cidr=10.67.0.0/16 \
 --node-cidr-mask-size=24 \
 --cluster-name=kubernetes \
 --allocate-node-cidrs=true \
 --kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \
 --authentication-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \
 --authorization-kubeconfig=/apps/kubernetes/config/kube_controller_manager.kubeconfig \
 --use-service-account-credentials=true \
 --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
 --requestheader-client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
 --node-monitor-grace-period=40s \
 --node-monitor-period=5s \
 --pod-eviction-timeout=5m0s \
 --terminated-pod-gc-threshold=50 \
 --alsologtostderr=true \
 --cluster-signing-cert-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
 --cluster-signing-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem  \
 --deployment-controller-sync-period=10s \
 --experimental-cluster-signing-duration=86700h0m0s \
 --enable-garbage-collector=true \
 --root-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
 --service-account-private-key-file=/apps/kubernetes/ssl/k8s/k8s-ca-key.pem \
 --feature-gates=RotateKubeletServerCertificate=true,RotateKubeletClientCertificate=true \
 --controllers=*,bootstrapsigner,tokencleaner \
 --horizontal-pod-autoscaler-use-rest-clients=true \
 --horizontal-pod-autoscaler-sync-period=10s \
 --flex-volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume \
 --tls-cert-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager.pem \
 --tls-private-key-file=/apps/kubernetes/ssl/k8s/k8s_controller_manager-key.pem \
 --kube-api-qps=100 \
 --kube-api-burst=100 \
 --log-dir=/apps/kubernetes/log \
 --v=2"
 # 創建啟動文件kube-controller-manager
 vi /usr/lib/systemd/system/kube-controller-manager.service
 [Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kube-controller-manager
ExecStart=/apps/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
User=k8s
[Install]
WantedBy=multi-user.target
# 給新創建文件 k8s 用戶權限
chown -R k8s.k8s /apps/kubernetes
# 啟動kube-controller-manager
systemctl start kube-controller-manager
# 設置開機啟動
systemctl enable kube-controller-manager
# 驗證狀態
[root@]~]#kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok
controller-manager   Healthy   ok
etcd-0               Healthy   {"health":"true"}
# 配置 kube-controller-manager,kubelet 、kube-scheduler 訪問kube-api 用戶授權
授予 kubernetes API 的權限
kubectl create clusterrolebinding controller-node-clusterrolebing --clusterrole=system:kube-controller-manager  --user=system:kube-controller-manager
kubectl create clusterrolebinding scheduler-node-clusterrolebing  --clusterrole=system:kube-scheduler --user=system:kube-scheduler
kubectl create clusterrolebinding controller-manager:system:auth-delegator --user system:kube-controller-manager --clusterrole system:auth-delegator
授予 kubernetes 證書訪問 kubelet API 的權限
kubectl create clusterrolebinding --user system:serviceaccount:kube-system:default kube-system-cluster-admin --clusterrole cluster-admin
kubectl create clusterrolebinding kubelet-node-clusterbinding --clusterrole=system:node --group=system:nodes
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

docker 部署

# 遠程服務器節點操作
# 使用阿里源
cat > /etc/yum.repos.d/docker-ce.repo << EOF
[docker-ce-stable]
name=Docker CE Stable - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge]
name=Docker CE Edge - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-debuginfo]
name=Docker CE Edge - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-edge-source]
name=Docker CE Edge - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/edge
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test]
name=Docker CE Test - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly]
name=Docker CE Nightly - \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/\$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo \$basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/debug-\$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg

[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF
# 安裝docker依賴
yum install -y    python-pip python-devel yum-utils device-mapper-persistent-data lvm2 
# 安裝docker
yum install -y docker-ce
# 修改docker 啟動配置
vi /lib/systemd/system/docker.service
ExecStart= 修改成
ExecStart=/usr/bin/dockerd -H fd:// --graph /apps/docker -H unix:///var/run/docker.sock  --max-concurrent-downloads=20 --log-opt max-size=200M --log-opt max-file=10 --default-ulimit nofile=1024000 --default-ulimit nproc=1024000
# reload service 配置
systemctl daemon-reload
# 重啟docker
systemctl restart docker
# 設置開機啟動
systemctl enable docker

安裝kubelet 依賴

# 遠程服務器節點操作
cni 插件安裝
mkdir -p /apps/cni/bin
cd  /apps/cni/bin
wget https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz
tar -xvf cni-plugins-linux-amd64-v0.8.2.tgz
rm -f cni-plugins-linux-amd64-v0.8.2.tgz
# 創建/etc/cni/net.d 目錄
mkdir -p /etc/cni/net.d
vi  /etc/cni/net.d/10-kuberouter.conflist
{
   "cniVersion":"0.3.0",
   "name":"mynet",
   "plugins":[
      {
         "name":"kubernetes",
         "type":"bridge",
         "bridge":"kube-bridge",
         "isDefaultGateway":true,
         "ipam":{
            "type":"host-local"
         }
      },
      {
         "type":"portmap",
         "capabilities":{
            "snat":true,
            "portMappings":true
         }
      }
   ]
}

# lxcfs 安裝
yum install -y git automake libtool fuse-devel
git clone git://github.com/lxc/lxcfs 
cd lxcfs/
./bootstrap.sh
./configure
make
make install 
# 創建目錄
mkdir -p /var/lib/lxcfs/
# 配置 lxcfs 啟動文件
vi /usr/lib/systemd/system/lxcfs.service
[Unit]
Description=FUSE filesystem for LXC
ConditionVirtualization=!container
Before=lxc.service
Documentation=man:lxcfs(1)
[Service]
ExecStart=/usr/local/bin/lxcfs /var/lib/lxcfs/
KillMode=process
Restart=on-failure
ExecStopPost=-/bin/fusermount -u /var/lib/lxcfs
Delegate=yes
[Install]
WantedBy=multi-user.target
# 啟動lxcfs
systemctl start lxcfs 
# 設置開機啟動
systemctl enable lxcfs
# 安裝 kubelet 依賴
yum install -y  epel-release
yum install -y   yum-utils  ipvsadm  telnet  wget  net-tools  conntrack  ipset  jq  iptables  curl  sysstat  libseccomp  socat  nfs-utils  fuse  fuse-devel 

kubelet 部署

# win on Ubuntu  操作
# 生成 bootstrap Token
# Bootstrap Token 生成
echo "$(head -c 6 /dev/urandom | md5sum | head -c 6)"."$(head -c 16 /dev/urandom | md5sum | head -c 16)"
9dad00.2ac445bf1cc5e9c2
vi bootstrap.secret.yaml
apiVersion: v1
kind: Secret
metadata:
  # Name MUST be of form "bootstrap-token-<token id>"
  name: bootstrap-token-9dad00
  namespace: kube-system

# Type MUST be 'bootstrap.kubernetes.io/token'
type: bootstrap.kubernetes.io/token
stringData:
  # Human readable description. Optional.
  description: "The default bootstrap token generated by 'kubelet '."

  # Token ID and secret. Required.
  token-id: 9dad00
  token-secret: 2ac445bf1cc5e9c2

  # Allowed usages.
  usage-bootstrap-authentication: "true"
  usage-bootstrap-signing: "true"

  # Extra groups to authenticate the token as. Must start with "system:bootstrappers:"
  auth-extra-groups: system:bootstrappers:worker,system:bootstrappers:ingress
### 創建k8s資源
kubectl create -f bootstrap.secret.yaml
### 創建bootstrap.clusterrole.yaml
vi bootstrap.clusterrole.yaml
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:certificates.k8s.io:certificatesigningrequests:selfnodeserver
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]

kubectl create -f bootstrap.clusterrole.yaml
### 創建 apiserver-to-kubelet.yaml
vi apiserver-to-kubelet.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:kubernetes-to-kubelet
rules:
  - apiGroups:
      - ""
    resources:
      - nodes/proxy
      - nodes/stats
      - nodes/log
      - nodes/spec
      - nodes/metrics
    verbs:
      - "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:kubernetes
  namespace: ""
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:kubernetes-to-kubelet
subjects:
  - apiGroup: rbac.authorization.k8s.io
    kind: User
    name: kubernetes
kubectl create -f apiserver-to-kubelet.yaml
### 查看創建的token
kubeadm token list
# 允許 system:bootstrappers 組用戶創建 CSR 請求
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers
# 自動批準 system:bootstrappers 組用戶 TLS bootstrapping 首次申請證書的 CSR 請求
kubectl create clusterrolebinding node-client-auto-approve-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:nodeclient --group=system:bootstrappers
# 自動批準 system:nodes 組用戶更新 kubelet 自身與 apiserver 通訊證書的 CSR 請求
kubectl create clusterrolebinding node-client-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes

# 自動批準 system:nodes 組用戶更新 kubelet 10250 api 端口證書的 CSR 請求
kubectl create clusterrolebinding node-server-auto-renew-crt --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeserver --group=system:nodes
# 創建bootstrap.kubeconfig
# 設置集群參數
kubectl config set-cluster kubernetes \
  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=bootstrap.kubeconfig
# 設置客戶端認證參數
kubectl config set-credentials system:bootstrap:9dad00 \
  --token=9dad00.2ac445bf1cc5e9c2 \
  --kubeconfig=bootstrap.kubeconfig
# 設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=system:bootstrap:9dad00 \
  --kubeconfig=bootstrap.kubeconfig
# 設置默認上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
# 分發 bootstrap.kubeconfig 到遠程節點
scp bootstrap.kubeconfig 192.168.30.32:/apps/kubernetes/conf
# 創建kubelet 啟動配置文件 遠程節點 操作
cd /apps/kubernetes/conf
vi  kubelet
KUBELET_OPTS="--bootstrap-kubeconfig=/apps/kubernetes/conf/bootstrap.kubeconfig \
              --fail-swap-on=false \
              --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/apps/cni/bin \
              --kubeconfig=/apps/kubernetes/conf/kubelet.kubeconfig \
              --address=192.168.30.32 \
              --node-ip=192.168.30.32 \
              --hostname-override=master \
              --cluster-dns=10.66.0.2 \
              --cluster-domain=cluster.local \
              --authorization-mode=Webhook \
              --authentication-token-webhook=true \
              --client-ca-file=/apps/kubernetes/ssl/k8s/k8s-ca.pem \
              --rotate-certificates=true \
              --cgroup-driver=cgroupfs \
              --healthz-port=10248 \
              --healthz-bind-address=192.168.30.32 \
              --cert-dir=/apps/kubernetes/ssl \
              --feature-gates=RotateKubeletClientCertificate=true,RotateKubeletServerCertificate=true \
              --node-labels=node-role.kubernetes.io/k8s-node=true \
              --serialize-image-pulls=false \
              --enforce-node-allocatable=pods,kube-reserved,system-reserved \
              --pod-manifest-path=/apps/work/kubernetes/manifests \
              --runtime-cgroups=/systemd/system.slice/kubelet.service \
              --kube-reserved-cgroup=/systemd/system.slice/kubelet.service \
              --system-reserved-cgroup=/systemd/system.slice \
              --root-dir=/apps/work/kubernetes/kubelet \
              --log-dir=/apps/kubernetes/log \
              --alsologtostderr=true \
              --logtostderr=false \
              --anonymous-auth=true \
              --image-gc-high-threshold=70 \
              --image-gc-low-threshold=50 \
              --kube-reserved=cpu=500m,memory=512Mi,ephemeral-storage=1Gi \
              --system-reserved=cpu=1000m,memory=1024Mi,ephemeral-storage=1Gi \
              --eviction-hard=memory.available<500Mi,nodefs.available<10% \
              --serialize-image-pulls=false \
              --sync-frequency=30s \
              --resolv-conf=/etc/resolv.conf \
              --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0 \
              --image-pull-progress-deadline=30s \
              --v=2 \
              --event-burst=30 \
              --event-qps=15 \
              --kube-api-burst=30 \
              --kube-api-qps=15 \
              --max-pods=200 \
              --pods-per-core=10 \
              --read-only-port=0 \
              --allowed-unsafe-sysctls 'kernel.msg*,kernel.shm*,kernel.sem,fs.mqueue.*,net.*' \
              --volume-plugin-dir=/apps/kubernetes/kubelet-plugins/volume"
# 創建 kubelet 服務文件
vi /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
LimitNOFILE=1024000
LimitNPROC=1024000
LimitCORE=infinity
LimitMEMLOCK=infinity
EnvironmentFile=-/apps/kubernetes/conf/kubelet
ExecStart=/apps/kubernetes/bin/kubelet $KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
# kubernetes 工作目錄
mkdir -p /apps/work/kubernetes/{manifests,kubelet}
# 啟動 kubelet
systemctl start kubelet 
# 設置開機啟動
systemctl enable kubelet
#查看證書是否簽發
cd /apps/kubernetes/ssl/
[root@master ssl]# ll
total 12
drwxr-xr-x 2 k8s k8s   75 Jul  4 15:06 etcd
drwxr-xr-x 2 k8s k8s  310 Sep  3 11:18 k8s
-rw------- 1 k8s k8s 1277 Sep  3 10:25 kubelet-client-2019-09-03-10-25-10.pem
lrwxrwxrwx 1 k8s k8s   59 Sep  3 10:25 kubelet-client-current.pem -> /apps/kubernetes/ssl/kubelet-client-2019-09-03-10-25-10.pem
-rw-r--r-- 1 k8s k8s 2153 Sep  3 10:25 kubelet.crt
-rw------- 1 k8s k8s 1675 Sep  3 10:25 kubelet.key
# 查看節點是否正常
kubectl get node
[root@]~]#kubectl get node
NAME     STATUS   ROLES      AGE   VERSION
master   Ready    k8s-node   27h   v1.15.3

kube-router 部署

# win on Ubuntu  操作
# 創建kube-router 訪問kube-apiserver 證書
cat << EOF | tee /apps/work/k8s/cfssl/k8s/kube-router.json
{
  "CN": "kube-router",
  "hosts": [""], 
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "system:masters",
      "OU": "Kubernetes-manual"
    }
  ]
}
EOF

## 生成 kube-router 證書和私鑰
cfssl gencert \
        -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
        -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
        -config=/apps/work/k8s/cfssl/ca-config.json \
        -profile=kubernetes \
         /apps/work/k8s/cfssl/k8s/kube-router.json | \
         cfssljson -bare /apps/work/k8s/cfssl/pki/k8s/kube-router
# 設置集群參數
kubectl config set-cluster kubernetes \
  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubeconfig.conf
# 設置客戶端認證參數
    kubectl config set-credentials kube-router \
  --client-certificate=/apps/work/k8s/cfssl/pki/k8s/kube-router.pem \
  --client-key=/apps/work/k8s/cfssl/pki/k8s/kube-router-key.pem \
  --embed-certs=true \
  --kubeconfig=kubeconfig.conf
# 設置上下文參數
    kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-router \
  --kubeconfig=kubeconfig.conf
# 設置默認上下文
kubectl config use-context default --kubeconfig=kubeconfig.conf
# 創建kube-router configmap 
kubectl create configmap "kube-proxy" --from-file=kubeconfig.conf
# 創建kubeadm-kuberouter-all-features-hostport.yaml
vi kubeadm-kuberouter-all-features-hostport.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-router-cfg
  namespace: kube-system
  labels:
    tier: node
    k8s-app: kube-router
data:
  cni-conf.json: |
    {
       "cniVersion":"0.3.0",
       "name":"mynet",
       "plugins":[
          {
             "name":"kubernetes",
             "type":"bridge",
             "bridge":"kube-bridge",
             "isDefaultGateway":true,
             "ipam":{
                "type":"host-local"
             }
          },
          {
             "type":"portmap",
             "capabilities":{
                "snat":true,
                "portMappings":true
             }
          }
       ]
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    k8s-app: kube-router
    tier: node
  name: kube-router
  namespace: kube-system
spec:
  selector:
    matchLabels:
      k8s-app: kube-router
  template:
    metadata:
      labels:
        k8s-app: kube-router
        tier: node
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      serviceAccountName: kube-router
      serviceAccount: kube-router
      containers:
      - name: kube-router
        image: docker.io/cloudnativelabs/kube-router
        imagePullPolicy: Always
        args:
        - --run-router=true 
        - --run-firewall=true 
        - --run-service-proxy=true 
        - --advertise-cluster-ip=true 
        - --advertise-loadbalancer-ip=true 
        - --advertise-pod-cidr=true 
        - --advertise-external-ip=true 
        - --cluster-asn=64512 
        - --metrics-path=/metrics 
        - --metrics-port=20241 
        - --enable-cni=true 
        - --enable-ibgp=true 
        - --enable-overlay=true 
        - --nodeport-bindon-all-ip=true 
        - --nodes-full-mesh=true 
        - --enable-pod-egress=true 
        - --cluster-cidr=10.67.0.0/16 
        - --v=2
        - --kubeconfig=/var/lib/kube-router/kubeconfig
        env:
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        - name: KUBE_ROUTER_CNI_CONF_FILE
          value: /etc/cni/net.d/10-kuberouter.conflist
        livenessProbe:
          httpGet:
            path: /healthz
            port: 20244
          initialDelaySeconds: 10
          periodSeconds: 3
        resources:
          requests:
            cpu: 250m
            memory: 250Mi
        securityContext:
          privileged: true
        volumeMounts:
        - name: lib-modules
          mountPath: /lib/modules
          readOnly: true
        - name: cni-conf-dir
          mountPath: /etc/cni/net.d
        - name: kubeconfig
          mountPath: /var/lib/kube-router
          readOnly: true
      initContainers:
      - name: install-cni
        image: busybox
        imagePullPolicy: Always
        command:
        - /bin/sh
        - -c
        - set -e -x;
          if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
            if [ -f /etc/cni/net.d/*.conf ]; then
              rm -f /etc/cni/net.d/*.conf;
            fi;
            TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
            cp /etc/kube-router/cni-conf.json ${TMP};
            mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
          fi
        volumeMounts:
        - name: cni-conf-dir
          mountPath: /etc/cni/net.d
        - name: kube-router-cfg
          mountPath: /etc/kube-router
      hostNetwork: true
      tolerations:
      - key: CriticalAddonsOnly
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: NoSchedule
        key: node.kubernetes.io/not-ready
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/ingress
        operator: Equal
      volumes:
      - name: lib-modules
        hostPath:
          path: /lib/modules
      - name: cni-conf-dir
        hostPath:
          path: /etc/cni/net.d
      - name: kube-router-cfg
        configMap:
          name: kube-router-cfg
      - name: kubeconfig
        configMap:
          name: kube-proxy
          items:
          - key: kubeconfig.conf
            path: kubeconfig
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kube-router
  namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: kube-router
  namespace: kube-system
rules:
  - apiGroups:
    - ""
    resources:
      - namespaces
      - pods
      - services
      - nodes
      - endpoints
    verbs:
      - list
      - get
      - watch
  - apiGroups:
    - "networking.k8s.io"
    resources:
      - networkpolicies
    verbs:
      - list
      - get
      - watch
  - apiGroups:
    - extensions
    resources:
      - networkpolicies
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: kube-router
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kube-router
subjects:
- kind: ServiceAccount
  name: kube-router
  namespace: kube-system
# 創建  kube-router 服務
kubectl apply -f  kubeadm-kuberouter-all-features-hostport.yaml
# 查看服務是否成功創建
[root@]~]#kubectl get pod -A | grep kube-router
kube-system            kube-router-5tmgw                           1/1     Running   0          21h
# 進入192.168.30.32
cat  /etc/cni/net.d/10-kuberouter.conflist
[root@master ssl]# cat  /etc/cni/net.d/10-kuberouter.conflist
{"cniVersion":"0.3.0","name":"mynet","plugins":[{"bridge":"kube-bridge","ipam":{"subnet":"10.67.0.0/24","type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"},{"capabilities":{"portMappings":true,"snat":true},"type":"portmap"}]}
# 已經獲取到ip段
ip a| grep  kube
[root@master ssl]# ip a| grep  kube
4: kube-bridge: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc noqueue state UP group default qlen 1000
    inet 10.67.0.1/24 brd 10.67.0.255 scope global kube-bridge
6: kube-dummy-if: <BROADCAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default
    inet 10.66.0.1/32 brd 10.66.0.1 scope link kube-dummy-if
    inet 10.66.0.2/32 brd 10.66.0.2 scope link kube-dummy-if
    inet 10.66.91.125/32 brd 10.66.91.125 scope link kube-dummy-if
    inet 10.66.86.10/32 brd 10.66.86.10 scope link kube-dummy-if
    inet 10.66.52.216/32 brd 10.66.52.216 scope link kube-dummy-if
# kube-router 部署正常

coredns 部署

# win on Ubuntu  操作
vi coredns.yaml
# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            upstream /etc/resolv.conf
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns
        imagePullPolicy: Always
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.66.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
# 創建 CoreDNS dns 服務
kubectl apply -f coredns.yaml
# 驗證服務
[root@]~]#kubectl get all -A | grep coredns
kube-system            pod/coredns-597b77445b-fhxvr                    1/1     Running   0          27h
kube-system            deployment.apps/coredns                     1/1     1            1           27h
kube-system            replicaset.apps/coredns-597b77445b                    1         1         1       27h
dig @10.66.0.2 www.baidu.com
[root@master ssl]# dig @10.66.0.2 www.baidu.com

; <<>> DiG 9.9.4-RedHat-9.9.4-74.el7_6.1 <<>> @10.66.0.2 www.baidu.com
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 40347
;; flags: qr rd ra; QUERY: 1, ANSWER: 3, AUTHORITY: 13, ADDITIONAL: 27

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;www.baidu.com.                 IN      A

;; ANSWER SECTION:
www.baidu.com.          30      IN      CNAME   www.a.shifen.com.
www.a.shifen.com.       30      IN      A       14.215.177.38
www.a.shifen.com.       30      IN      A       14.215.177.39

;; AUTHORITY SECTION:
com.                    30      IN      NS      h.gtld-servers.net.
com.                    30      IN      NS      m.gtld-servers.net.
com.                    30      IN      NS      g.gtld-servers.net.
com.                    30      IN      NS      d.gtld-servers.net.
com.                    30      IN      NS      a.gtld-servers.net.
com.                    30      IN      NS      j.gtld-servers.net.
com.                    30      IN      NS      c.gtld-servers.net.
com.                    30      IN      NS      l.gtld-servers.net.
com.                    30      IN      NS      b.gtld-servers.net.
com.                    30      IN      NS      f.gtld-servers.net.
com.                    30      IN      NS      k.gtld-servers.net.
com.                    30      IN      NS      i.gtld-servers.net.
com.                    30      IN      NS      e.gtld-servers.net.

;; ADDITIONAL SECTION:
e.gtld-servers.net.     30      IN      AAAA    2001:502:1ca1::30
a.gtld-servers.net.     30      IN      A       192.5.6.30
i.gtld-servers.net.     30      IN      AAAA    2001:503:39c1::30
c.gtld-servers.net.     30      IN      A       192.26.92.30
g.gtld-servers.net.     30      IN      AAAA    2001:503:eea3::30
m.gtld-servers.net.     30      IN      A       192.55.83.30
d.gtld-servers.net.     30      IN      A       192.31.80.30
a.gtld-servers.net.     30      IN      AAAA    2001:503:a83e::2:30
b.gtld-servers.net.     30      IN      A       192.33.14.30
b.gtld-servers.net.     30      IN      AAAA    2001:503:231d::2:30
i.gtld-servers.net.     30      IN      A       192.43.172.30
d.gtld-servers.net.     30      IN      AAAA    2001:500:856e::30
l.gtld-servers.net.     30      IN      A       192.41.162.30
h.gtld-servers.net.     30      IN      AAAA    2001:502:8cc::30
e.gtld-servers.net.     30      IN      A       192.12.94.30
l.gtld-servers.net.     30      IN      AAAA    2001:500:d937::30
k.gtld-servers.net.     30      IN      AAAA    2001:503:d2d::30
j.gtld-servers.net.     30      IN      AAAA    2001:502:7094::30
m.gtld-servers.net.     30      IN      AAAA    2001:501:b1f9::30
f.gtld-servers.net.     30      IN      A       192.35.51.30
g.gtld-servers.net.     30      IN      A       192.42.93.30
h.gtld-servers.net.     30      IN      A       192.54.112.30
j.gtld-servers.net.     30      IN      A       192.48.79.30
k.gtld-servers.net.     30      IN      A       192.52.178.30
c.gtld-servers.net.     30      IN      AAAA    2001:503:83eb::30
f.gtld-servers.net.     30      IN      AAAA    2001:503:d414::30

;; Query time: 6 msec
;; SERVER: 10.66.0.2#53(10.66.0.2)
;; WHEN: Wed Sep 04 14:17:05 CST 2019
;; MSG SIZE  rcvd: 897
dig @10.66.0.2 kube-dns.kube-system.svc.cluster.local
[root@master ssl]# dig @10.66.0.2 kube-dns.kube-system.svc.cluster.local

; <<>> DiG 9.9.4-RedHat-9.9.4-74.el7_6.1 <<>> @10.66.0.2 kube-dns.kube-system.svc.cluster.local
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 40471
;; flags: qr aa rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; WARNING: recursion requested but not available

;; OPT PSEUDOSECTION:
; EDNS: version: 0, flags:; udp: 4096
;; QUESTION SECTION:
;kube-dns.kube-system.svc.cluster.local.        IN A

;; ANSWER SECTION:
kube-dns.kube-system.svc.cluster.local. 5 IN A  10.66.0.2

;; Query time: 1 msec
;; SERVER: 10.66.0.2#53(10.66.0.2)
;; WHEN: Wed Sep 04 14:19:13 CST 2019
;; MSG SIZE  rcvd: 121
能夠正常解析

traefik-https 部署

# win on Ubuntu  操作 

#申請證書 請使用letsencrypt 申請免費 多域名證書
#重命名證書名字
tls.crt 
tls.key
#創建 secret
kubectl -n kube-system create secret generic tls-cert --from-file=tls.key --from-file=tls.crt
# 創建traefik 配置
vi traefik.toml
defaultEntryPoints = ["http","https"]
[entryPoints]
  [entryPoints.http]
  address = ":80"
    entryPoint = "https"
  [entryPoints.https]
  address = ":443"
    [entryPoints.https.tls]
      [[entryPoints.https.tls.certificates]]
      certFile = "/certs/tls.crt"
      keyFile = "/certs/tls.key"
# 生成 configmap
kubectl create configmap traefik-conf --from-file=traefik.toml -n kube-system
# 創建traefik-rbac
vi traefik-rbac.yaml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik
  namespace: kube-system
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
    - extensions
    resources:
    - ingresses/status
    verbs:
    - update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: traefik
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik
subjects:
- kind: ServiceAccount
  name: traefik
  namespace: kube-system
# traefik-deployment-https
vi traefik-deployment-https.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik
  namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: traefik
  namespace: kube-system
  labels:
    k8s-app: traefik
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: traefik
  template:
    metadata:
      labels:
        k8s-app: traefik
        name: traefik
    spec:
      serviceAccountName: traefik
      terminationGracePeriodSeconds: 60
      volumes:
      - name: ssl
        secret:
          secretName: tls-cert
      - name: config
        configMap:
          name: traefik-conf
          defaultMode: 0644
          items:
          - key: traefik.toml
            path: traefik.toml 
      hostNetwork: true
      dnsPolicy: ClusterFirstWithHostNet      
      containers:
      - image: traefik
        name: traefik
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /certs         
          name: "ssl"
        - mountPath: /etc/traefik.toml
          subPath: traefik.toml
          name: "config"
        ports:
        - name: http
          containerPort: 80
          hostPort: 80
        - name: https
          containerPort: 443
          hostPort: 443
        - name: admin
          containerPort: 8080
        args:
        - --api
        - --web
        - --api.dashboard
        - --logLevel=INFO
        - --web.metrics
        - --metrics.prometheus
        - --web.metrics.prometheus
        - --kubernetes
        - --traefiklog
        - --traefiklog.format=json
        - --accesslog
        - --accesslog.format=json
        - --accessLog.fields.headers.defaultMode=redact
        - --insecureskipverify=true
        - --configFile=/etc/traefik.toml       
#      nodeSelector:
#        ingress: "yes"
#      tolerations:
#      - effect: NoSchedule
#        key: node-role.kubernetes.io/ingress
#        operator: Equal
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: traefik
  name: traefik
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik
  clusterIP: None
  ports:
    - protocol: TCP
      port: 80
      name: http
    - protocol: TCP
      port: 443
      name: https
    - protocol: TCP
      port: 8080
      name: admin
  type: ClusterIP
# 或者traefik-daemonset-https
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: traefik
  namespace: kube-system
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: traefik
  namespace: kube-system
  labels:
    k8s-app: traefik
spec:
  selector:
    matchLabels:
      k8s-app: traefik
  template:
    metadata:
      labels:
        k8s-app: traefik
        name: traefik
    spec:
      serviceAccountName: traefik
      terminationGracePeriodSeconds: 60
      volumes:
      - name: ssl
        secret:
          secretName: tls-cert
      - name: config
        configMap:
          name: traefik-conf
          defaultMode: 0644
          items:
          - key: traefik.toml
            path: traefik.toml
      hostNetwork: true
      dnsPolicy: ClusterFirstWithHostNet      
      containers:
      - image: traefik
        name: traefik
        imagePullPolicy: IfNotPresent
        volumeMounts:
        - mountPath: /certs         
          name: "ssl"
        - mountPath: /etc/traefik.toml
          subPath: traefik.toml
          name: "config"
        ports:
        - name: http
          containerPort: 80
          hostPort: 80
        - name: https
          containerPort: 443
          hostPort: 443
        - name: admin
          containerPort: 8080
        securityContext:
          capabilities:
            drop:
            - ALL
            add:
            - NET_BIND_SERVICE
        args:
        - --api
        - --web
        - --api.dashboard
        - --logLevel=INFO
        - --web.metrics
        - --metrics.prometheus
        - --web.metrics.prometheus
        - --kubernetes
        - --traefiklog
        - --traefiklog.format=json
        - --accesslog
        - --accesslog.format=json
        - --accessLog.fields.headers.defaultMode=redact
        - --insecureskipverify=true
        - --configFile=/etc/traefik.toml       
      nodeSelector:
        ingress: "yes"
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/ingress
        operator: Equal
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: traefik
  name: traefik
  namespace: kube-system
spec:
  selector:
    k8s-app: traefik
  clusterIP: None
  ports:
    - protocol: TCP
      port: 80
      name: http
    - protocol: TCP
      port: 443
      name: https
    - protocol: TCP
      port: 8080
      name: admin
  type: ClusterIP
#創建 traefik-dashboard
vi traefik-dashboard.yam
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: traefik-dashboard
  namespace: kube-system
  annotations:
    kubernetes.io/ingress.class: traefik
    traefik.ingress.kubernetes.io/frontend-entry-points: http,https
spec:
  rules:
  - host: trae.xxx.com
    http:
      paths:
        - backend:
            serviceName: traefik
            servicePort: 8080
  tls:
   - secretName: tls-cert
# 創建 服務器
kubectl apply -f traefik-deployment-https.yaml
kubectl apply -f traefik-rbac.yaml
kubectl apply -f traefik-dashboard.yaml
hosts 綁定 訪問 trae.xxx.com 是否正常
能正常打開證明正常

kubernetes-dashboard 部署

# win on Ubuntu  操作  
# 創建 kubernetes-dashboard 使用證書
cat << EOF | tee /apps/work/k8s/cfssl/k8s/dashboard.json
{
  "CN": "dashboard",
  "hosts": [""], 
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "cluster",
      "OU": "cluster"
    }
  ]
}
EOF
##### 生成kubernetes-dashboard 證書,當然如果有外部簽發的證書也可以使用
cfssl gencert \
        -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
       -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
        -config=/apps/work/k8s/cfssl/ca-config.json \
        -profile=kubernetes \
        /apps/work/k8s/cfssl/k8s/dashboard.json | \
        cfssljson -bare ./dashboard
# base64 加密
cat dashboard.pem|base64 | tr -d '\n'
cat dashboard-key.pem|base64 | tr -d '\n'
# 做好記錄
# kubernetes-dashboard 
vi kubernetes-dashboard.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Namespace
metadata:
  name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/ingress.class: traefik
spec:
  ports:
    - port: 443
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-certs
  namespace: kubernetes-dashboard
type: Opaque
data:
  dashboard.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBeFlFV0MxbGlqcnFzNW5vcHBxTXF0YzZSY0pnSWFJSGhGemZZUWhRQm5pK0Vjam8vCkRTUkYvY3BUOFlkTTg2MVpEV1lSN1FEelFLNmJUTmRLWXJJYmpVWHJpRFVFU01EUW13Y1VteTMzWjFpeXR6K0wKUUVmTVFvWVNReGVIY2RqUHp3bUhFS0todk9vNmxQTHNFWkMwQ3ZCamw2VHlERjhuSDEzby9kRlRVbGJhWUlGaQpPeGVIWkxMMTZKbmNLK3RVaW9ncjdLekFKMUkxTjdwOVQ1blZ5YU9PbWNCVEFnU3RJM0ZwSzdMZG1zaVU0ZEZ0CkpSSFZ0eTh6Y3dCSU9wWnhqV29mM2ROVkRrVUFsYjVtV2psU0RaQ2lhYmFYQi91NmJ0R0k3RlY2cENaUzdDVG4KeWlpUFlFSXRPSGRCT0VycGpKZWQ0bHQ5K2MvNDE3UTRIaiswdndJREFRQUJBb0lCQVFDK1daSWdjQTZRRnhScQpzVlNST1BNQjlFdXlJNlQrN0NZL2xXQUZGM2tUdHlKRVlTVEJpck0yVFprbjBFbjNGSndlVU1CNEZwRmJScTJBCm1vSWpxeHJveG5taGRjOWlPd3NTVHZtcU1kd2ZLNXBiQ0pBeDdNRE5ZS0FiTDRNbjAxazlaaVpaZnhTNG1WcksKa1hHNTRDZlYzeWR0VU5qRDJiVkFBdWQ2TVJQSDV5QWJTVktsMG9ONkRCaFV4MlYyWEo0WnRUVHE0b3R6VGYxZwp3SjNJeVFjSXl3czE2V3dkeHpuYStqVmpOYU5OQ3ZCT1BMbm9TeXZBQXZGRG9UYmUrMG1tcnZLVmlSeDBDT1FzCkUwNjFtNHY2eUExL3locndkT1BDYXN6SkpjWlYzOThJTzFKb2QxUHk3OU9aT1FpY1FEOGhwQmxqb0FSQ2JlY3QKRFFPcG5CR0JBb0dCQVBhYlJSSGpPTkxIQ25JZWlFQU1EYXNwQXo2RGxRNkQvdWNNdzROdkVPRVNVa3dvQ0p4cApwK1hJeVVzT1B1d2swTzVCcHJRcHZjdGYyWXlLZTFtR25iVUpmUVNWNGpLdWpqb0M0OWhOWk9lSE8zd0xMcnNXCkl1SU1Qeko0TjhxSzl0dUpDQ3BVYUZFVzRiN1R2OGsyK1pJWHJwN3hzNklDd01EUnpTaW9wY0hCQW9HQkFNMEgKQVl1bmdzY3hTM2JnZ05idU5sQ3lIOHBLZFVPbi95cU9IQUdYcG9vZmJUbXJiUUlWN0ZOVSszUTlYc2ErVVE0QwpUbVdFbzhabVhrL3lIV2FDVWxpRkN0ckRhTzNUZVhvb2pia1JyaDcxakFXN0pjVDRVZ1ZwcG1RakFVUW8vOWtVCmxHMUNpOTFZZy94dlV5dHlYM1BnZHJ6SnU2aWNsM1pVZ1h4dzNoWi9Bb0dBZENmY2w3bFVLWXZSTXNHSTRjb0wKb2lRMlAvclFlYjdZa05IbFFZSk9EQVdLT0E3ZlIzVkl2U1lmRWpoS2tRWWlWeWNiTTE4NTQ1SnBNUmFGVlR6ZwpDY2JIV1NLVUlkVXdic2l2czFGNUJza2V6cVdoeEVOLytNTlYvUnE5QkswQjY1UVhBWUV5aFlkbW0zQzN0RG90CndZOWdFOE83SGNONE1ScGhMUmFLeE1FQ2dZRUFoS2E5eHorUUM1VEhRSmlzZzJNSVhWbUIyLzRrdEt0akdvTnIKZDFSSStpQ3ZLSnJUSW9CUXNQSFE1em8xc2R5ODBKV0paNEZUL1MrS1lhdENmbXBmSU1xalpUcjlEcksrYTkwRgpKUEpkZDhaaTIrcGoyM2JXaW8zNmk5dGlIRmx5ZjE4alVUVzNESFVTb0NiZTVzTlBJc2ZkeXZPeXFMcjMvQ1ZjCnlaOU1jYjBDZ1lBMVp2RVM3bU42Nm10T2JpSlR3a3hhaTVvS2tHbDdHTDJkZXJFUmxsc1YrNWRCSVY4dG5DTnAKT2tjMFlMbHV2TEg4cG4zd2VCNzg5dUFCQjNXYmNKcHg0L2NIRm9oZDNhdlR0RThRVjJod0tNS2RKQVBvTHNoMgprK2lEUWd1dmFxSzNmL1RYUW43bWU3dWFqSDk3SXZldXJtWWsvVmRJY0dicnd1SVRzd0FEYWc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=
  dashboard.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ5ekNDQXQrZ0F3SUJBZ0lVUWRIVXdKS1JYc1ZRb2VYS1JDTjd0eVcwWU04d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURjd05ERXhNVE13TUZvWERUSTVNRGN3TVRFeE1UTXcKTUZvd2JURUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUkl3CkVBWURWUVFERXdsa1lYTm9ZbTloY21Rd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUIKQVFERmdSWUxXV0tPdXF6bWVpbW1veXExenBGd21BaG9nZUVYTjloQ0ZBR2VMNFJ5T2o4TkpFWDl5bFB4aDB6egpyVmtOWmhIdEFQTkFycHRNMTBwaXNodU5SZXVJTlFSSXdOQ2JCeFNiTGZkbldMSzNQNHRBUjh5Q2hoSkRGNGR4CjJNL1BDWWNRb3FHODZqcVU4dXdSa0xRSzhHT1hwUElNWHljZlhlajkwVk5TVnRwZ2dXSTdGNGRrc3ZYb21kd3IKNjFTS2lDdnNyTUFuVWpVM3VuMVBtZFhKbzQ2WndGTUNCSzBqY1drcnN0MmF5SlRoMFcwbEVkVzNMekp6QUVnNgpsbkdOYWgvZDAxVU9SUUNWdm1aYU9WSU5rS0pwdHBjSCs3cHUwWWpzVlhxa0psTHNKT2ZLS0k5Z1FpMDRkMEU0ClN1bU1sNTNpVzMzNXovalh0RGdlUDdTL0FnTUJBQUdqZ1kwd2dZb3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CMEcKQTFVZEpRUVdNQlFHQ0NzR0FRVUZCd01CQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjBHQTFVZApEZ1FXQkJURTl6cWx4dkErRXMrbE8zWlFEMlhubGFHRFpqQWZCZ05WSFNNRUdEQVdnQlJ4NEtjQVJjYWtSL2J4Cm13b1RCZURzK3hBb2FUQUxCZ05WSFJFRUJEQUNnZ0F3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUJnWHZwTEMKQjIybXlQaURlZnhsWGNZRzAvY0R2RXlYcTlENWtKTnBxKzFZQ0EvMlp2RDIyN1Q5VjY3aHVyTlA3T2FvSG95Tgo0MHpkR3lZTGRNV3pyZTQwVksxdC84N3pDTENzamt1ZXRCRWEwNVRqUTJhbDRhSzJ6TXl5MkJLWEpYbjlvdkhzCjJwNndvL001eklEOXl2OEhyRkZqWHM3NitTUTFzNXpOdUxuaDBET0Z1SktiZUZxSUJyNmZRbXlsb0l1VURtZjYKcGtQYkJyRnJpNHFGS0lDcVZKRCt3Z01zRFBiclVMZXF5NWlBVjNqRzJKMFgxOE4zdklCeUFwdWhZbjNudlV0TwpLREVIWkFJcFpjRWdqQ2ZLVDNyaERLL3JLN0VFZkxLcGlCdGJya3pFbjVWV3FQUFJEK3ZPU2VySldETDl1K0xyCmhEazlvZ084cmNqQzZGdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-tls-cert
  namespace: kubernetes-dashboard
type: Opaque
data:
  tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUdYekNDQlVlZ0F3SUJBZ0lTQkdVcDlSaVAvK2lNMDVYM0FuY0FUeTg1TUEwR0NTcUdTSWIzRFFFQkN3VUEKTUVveEN6QUpCZ05WQkFZVEFsVlRNUll3RkFZRFZRUUtFdzFNWlhRbmN5QkZibU55ZVhCME1TTXdJUVlEVlFRRApFeHBNWlhRbmN5QkZibU55ZVhCMElFRjFkR2h3Y21sMGVTQllNekFlRncweE9UQTNNRGt3T1RJNU1ESmFGdzB4Ck9URXdNRGN3T1RJNU1ESmFNQll4RkRBU0JnTlZCQU1UQzIxa1pHZGhiV1V1WTI5dE1JSUNJakFOQmdrcWhraUcKOXcwQkFRRUZBQU9DQWc4QU1JSUNDZ0tDQWdFQW9mSVdOdTE4YUp1T3Jzd0JjZE9lODN0dWpXZ2dpUXl0VVYxQwpqNVhYbzNjQTM1L2ZxQXNGVHpJRGNwUmxhTGJ6SHd1d1psOWNSKzJuRENaUzI4VlhZaXcrSkQvQXpna3FzTHFJCjZ3YlFhcHNCa1lYUzRuT1UrZzhSMVgwcm52ckpickE1eHFJSWJKM002ajVLTXZ4RktvMEV3YXNBY2NiYlVGOW4KMHQ2RzNreG4zWW1Sek5HeHh2bXZ4V2prNWNkSWMza0MyT1VuRktGOG5XemJab2JiNk9PUnZSaElEWW5YdjkxdgoyMUYwQnZ0Q21GY0FEaDRqZXUrLzNKVDVLcEJkdkFHOHI3aU1wbkhKaFU1alhqTXlPRytMbkcvcnJuRzJGaXpHCmx1UHQwKzRlK0ZRSXFZY1BUM1cyTUF2ZDlzQTNEMThsUW82M00vZlMyYjNIYVNidFY0b1pmNS9zTzJNeEVPVnoKVEd1M0NxYk40TkcrZE8ycXoxYWxMQmlGZlVjNEdmUVpYRmlLaDFzazl3Qm5zeWhqYUZmdUx6bHRxMDg3STJLYQorVlRaUzFQSlJFbGduM3UwY1FmaENjelF5ZTJ3Vjl6RE9lVmUxeTBjLzZ0RWJhNllCeGR2ZGcwOFpKL0QwYTBLCnJvWlVJMW5Rc2RKeE8rQ3N1OURLYjROZzJCYnZkWVpHVWJrSCtSUDU0UUdrS1VnYnVxNVIwbXI0U1I2VUwrRE4KZjNxem81a3ZiMXVRWXFpaDZYUFVDVUVPOTNOU1Y2MTNUSUVOTUpyYjVhbGRLUkhPZlpWL201QThlUy9ibFFYcgpOV3FCRy9OL2RtckZjMmcyNGJEY3d5OXIzL3FkNy9MTWxmMVRVdzJGczR3M2x2VHJFanlwWEZhQ3BRRGxkc0xJCkYwcWVKVnNDQXdFQUFhT0NBbkV3Z2dKdE1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZEJnTlZIU1VFRmpBVUJnZ3IKQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3REFZRFZSMFRBUUgvQkFJd0FEQWRCZ05WSFE0RUZnUVVHUUNXOGNFbgpaNWhVWjBDa004QW03Wjh7NGJNd0h4WURWUjBqQkJnd0ZvQVVxRXBxWXdSOTNicm0wVG0zcGtWbDcvT283S0V3CmJ3WUlLd1lCQlFVSEFRRUVZekJoTUM0R0NDc0dBUVVGQnpBQmhpSm9kSFJ3T2k4dmIyTnpjQzVwYm5RdGVETXUKYkdWMGMyVnVZM0o1Y0hRdWIzSm5NQzhHQ0NzR0FRVUZCekFDaGlOb2RIUndPaTh3WTJWeWRDNXBiblF0ZURNdQpiR1YwYzJWdVkzSjVjSFF1YjNKbkx6QWxCZ05WSFJFRUhqQWNnZzBxTG0xa1pHZGhiV1V1WTI5dGdndHRaR1JuCllXMWxMbU52YlRCTUJnTlZIU0FFUlRCRE1BZ0dCbWVCREFFQ0FUQTNCZ3NyQmdFRUFZTGZFd0VCQVRBb01DWUcKQ0NzR0FRVUZCd0lCRmhwb2RIUndPaTh3WTNCekxteGxkSE5sYm1OeWVYQjBMbTl5WnpDQ0FRWUdDaXNHQVFRQgoxbmtDQkFJRWdmY0VnZlFBOGdCM0FPSnBTNjRtNk9sQUNlaUdHN1k3ZzlRKzUvNTBpUHVranlpVEFaM2Q4ZHYrCkFBQUJhOVpIamZBQUFBUURBRWd3UmdJaEFKNXBWaDFDSEpmcTFhd2NOYmxEU2FwL1prQmVBeXU5ajcrTVhISnMKTEI3TUFpRUFwM2xLVVNCZXpiQWpodkZWSTBGR3ZFWmtzU2lYKyt3SitiZ3VLOXlaS3JBQWR3QXBQRkdXVk1nNQpaYnFxVVB4WUI5UzNiNzlZZWlseTNLVEREUFRsUlVmMGVBQUFBV3ZXUjQzd0FBQUVBd0JJTUVZQ0lRRDI1L1NHClcrWHRDa2VzaHViekZtUnRnaDUrWXMxaXpnSG5CSmtOS1Z0cE9nSWhBT1lteWJCWjV3RjZBeE5UT29WdnkyYVMKNktEdURyWmRzSVYrN251WkhFSDdNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUNjRHFwTzF3OWdNbzJGaW1GTgpwSUlxT3d1N2hsUWVURU44enY1UmFiYWtGelpvZlhURXpRcGNtSlNWRUhET25MVGpjaWpITWxtbGdIbndTM2w0CjAyWFB0akIzUWJUNFRWUHlqUGpBZ1ZvL1ZmclNJT2N5S1pKRDNJMWxLNXV1anRCdGF3Rnh4cjBYeGd1Q2k5TlUKdlQ2R0RxYnlaVVdiL1I0bXVVYzFwRzMySVJiS3BxQnZveitsaGRMNHdOb1M5YXdiUlg3LzBmUytEZUZiZ09vbgpzYnBDYTFQeFdqWHYwNloxNkF0LzBRTlVZLzExdEw4bTRDK3Q2OW5kOUt6eUdRZmdOank2NmM1RmhIODVBQkNFClJ6L3NoVkdyb1lTQkh4M1Q0c0NKZnh5dW5oK0tVZ0dvRFk5VUc5RzI2T200eHgvWFQ5OTZONTNxUytPS21iY0wKajVJMgotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCgotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJRWtqQ0NBM3FnQXdJQkFnSVFDZ0ZCUWdBQUFWT0ZjMm9MaGV5bkNEQU5CZ2txaGtpRzl3MEJBUXNGQURBLwpNU1F3SWdZRFZRUUtFeHRFYVdkcGRHRnNJRk5wWjI1aGRIVnlaU0JVY25WemRDQkRieTR4RnpBVkJnTlZCQU1UCkRrUlRWQ0JTYjI5MElFTkJJRmd6TUI0WERURTJNRE14TnpFMk5EQTBObG9YRFRJeE1ETXhOekUyTkRBME5sb3cKU2pFTE1Ba0dBMVVFQmhNQ1ZWTXhGakFVQmdOVkJBb1REVXhsZENkeklFVnVZM0o1Y0hReEl6QWhCZ05WQkFNVApHa3hsZENkeklFVnVZM0o1Y0hRZ1FYVjBhRzl5YVhSNUlGZ3pNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DCkFROEFNSUlCQ2dLQ0FRRUFuTk1NOEZybExrZTNjbDAzZzdOb1l6RHExelVtR1NYaHZiNDE4WENTTDdlNFMwRUYKcTZtZU5RaFk3TEVxeEdpSEM2UGpkZVRtODZkaWNicDVnV0FmMTVHYW4vUFFlR2R4eUdrT2xaSFAvdWFaNldBOApTTXgreWsxM0VpU2RSeHRhNjduc0hqY0FISnlzZTZjRjZzNUs2NzFCNVRhWXVjdjliVHlXYU44aktrS1FESVowClo4aC9wWnE0VW1FVUV6OWw2WUtIeTl2NkRsYjJob256aFQrWGhxK3czQnJ2YXcyVkZuM0VLNkJsc3BrRU5uV0EKYTZ4Szh5dVFTWGd2b3BaUEtpQWxLUVRHZE1EUU1jMlBNVGlWRnJxb003aEQ4YkVmd3pCL29ua3hFejB0TnZqagovUEl6YXJrNU1jV3Z4STBOSFdRV002cjZoQ20yMUF2QTJIM0Rrd0lEQVFBQm80SUJmVENDQVhrd0VnWURWUjBUCkFRSC9CQWd3QmdFQi93SUJBREFPQmdOVkhROEJBZjhFQkFNQ0FZWXdmd1lJS3dZQkJRVUhBUUVFY3pCeE1ESUcKQ0NzR0FRVUZCekFCaGlab2RIUndPaTh3YVhOeVp5NTBjblZ6ZEdsa0xtOWpjM0F1YVdSbGJuUnlkWE4wTG1OdgpiVEE3QmdnckJnRUZCUWN3QW9ZdmFIUjBjRG92TDJGd2NITXVhV1JsYm5SeWRYTjBMbU52YlM5eWIyOTBjeTlrCmMzUnliMjkwWTJGNE15NXdOMk13SHdZRFZSMGpCQmd3Rm9BVXhLZXhwSHNzY2ZyYjRVdVFkZi9FRldDRmlSQXcKVkFZRFZSMGdCRTB3U3pBSUJnWm5nUXdCQWdFd1B3WUxLd1lCQkFHQzN4TUJBUUV3TURBdUJnZ3JCZ0VGQlFjQwpBUllpYUhSMGNEb3ZMMk53Y3k1eWIyOTBMWGd4TG14bGRITmxibU55ZVhCMExtOXlaekE4QmdOVkhSOEVOVEF6Ck1ER2dMNkF0aGl0b2RIUndPaTh3WTNKc0xtbGtaVzUwY25WemRDNWpiMjB2UkZOVVVrOVBWRU5CV0RORFVrd3UKWTNKc01CMEdBMVVkRGdRV0JCU29TbXBqQkgzZHV1YlJPYmVtUldYdjg2anNvVEFOQmdrcWhraUc5dzBCQVFzRgpBQU9DQVFFQTNUUFhFZk5qV0RqZEdCWDdDVlcrZGxhNWNFaWxhVWNuZThJa0NKTHhXaDlLRWlrM0pIUlJIR0pvCnVNMlZjR2ZsOTZTOFRpaFJ6WnZvcm9lZDZ0aTZXcUVCbXR6dzNXb2RhdGcrVnlPZXBoNEVZcHIvMXdYS3R4OC8Kd0FwSXZKU3d0bVZpNE1GVTVhTXFyU0RFNmVhNzNNajJ0Y015bzVqTWQ2am1lV1VISzhzby9qb1dVb0hPVWd3dQpYNFBvMVFZeiszZHN6a0RxTXA0ZmtseEJ3WFJzVzEwS1h7UE1UWitzT1BBdmV5eGluZG1qa1c4bEd5K1FzUmxHClBmWitHNlo2aDdtamVtMFkraVdsa1ljVjRQSVdMMWl3Qmk4c2FDYkdTNWpOMnA4TStYK1E3VU5LRWtST2IzTjYKS09xa3FtNTdUSDJIM2VESkFrU25oNi9ETkZ1MFFnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ==
  tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS0FJQkFBS0NBZ0VBb2ZJV051MThhSnVPcnN3QmNkT2U4M3R1aldnZ2lReXRVVjFDajVYWG8zY0EzNS9mCnFBc0ZUeklEY3BSbGFMYnpId3V3Wmw5Y1IrMm5EQ1pTMjhWWFlpdytKRC9BemdrcXNMcUk2d2JRYXBzQmtZWFMKNG5PVStnOFIxWDBybnZySmJyQTV4cUlJYkozTTZqNUtNdnhGS28wRXdhc0FjY2JiVUY5bjB0Nkcza3huM1ltUgp6Tkd4eHVtdnhXams1Y2RJYzNrQzJPVW5GS0Y4bld6YlpvYmI2T09SdlJoSURZblh3OTF2MjFGMEJ2dENtRmNBCkRoNGpldSsvM0pUNUtwQmR2QUc4cjdpTXBuSEpoVTVqWGpNeU9HK0xuRy9ycm5HMkZpekdsdVB0MCs0ZStGUUkKcVljUFQzVzJNQXZkOXNBM0QxOGxRbzYzTS9mUzJiM0hhU2J0VjRvWmY1L3NPMk14RU9WelRHdTNDcWJONE5HKwpkTzJxejFhbExCaUZmVWM0R2ZRWlhGaUtoMXNrOXdCbnN5aGphRmZ1THpsdHEwODdJMkthK1ZUWlMxUEpSRWxnCm4zdTBjUWZoQ2N6UXllMndWOXpET2VWZTF5MGMvNnRFYmE2WUJ4ZHZkZzA4WkovRDBhMEtyb1pVSTFuUXNkSngKTytDc3U5REtiNE5nMkJidmRZWkdVYmtIK1JQNTRRR2tLVWdidXE1UjBtcjRTUjZVTCtETmYzcXpvNWt2YjF1UQpZcWloNlhQVUNVRU85M05TVjYxM1RJRU5NSnJiNWFsZEtSSE9mWlYvbTVBOGVTL2JsUVhyTldxQkcvTi9kbXJGCmMyZzI0YkRjd3k5cjMvcWQ3L0xNbGYxVFV3MkZzNHczbHZUckVqeXBYRmFDcFFEbGRzTElGMHFlSlZzQ0F3RUEKQVFLQ0FnQXY5Zk13UnpzTisrdlF4cWd5M3JwM1gzbkpOU3BWakVTVUVTdVNQSTFGWXd3R0xtSGRjWTRiK3pMYwpMeWl0VDJsSEszNE5nM1pmOHZrQzl5S1k1YVBRZGt2ZERtaDZYR3FoTmswd1ZhOUpzeWhPd2JSSHpuVXpiVjBaCnZkMDZVd2x1MTQvMHpLMzBCUFBYOTZTZjN1aFpCclIrNnJiUisxT2VSUE1KbDArWDdFYmliRWlhd1F1R1hsVHAKQVB5eE5FaTNzZ0h2M0VhcnJIdXNYNzNHYW5BY1U3RW9zRlUrZFRGSktEcGxXSVVsUUNwajFYZzF0aVZKMWxFYQo4Wit0UkY0T1BQRjFsUkZLaGU1cHBXSjJWbkVzRjVUZ09xRXc0NHBLbk80Zlo5ZGFhVzRRbTBxSmNtOU5XQTRoCndwSDA3czRmcGt6eG5qU1JsbmFDZDlyandGeVBsSkJzUXNhVlFFNzlpQzJZMTRnTk9KQ0xyMXRKSEQ2ODN3bW4KS3ZNOHZpOTdHTmIybXZHeWNtZnloNVpzTFBpTWNqOFFER3VWZU53dlNESXpybnhqVkZlc0liTWt5UlZRem9IVApTTHRQbXdVR3lwRHVrMDhaZytsT0lYOC85K3lqMER3MDRqenllTVptYlFVdkd2N2lNWjFUaHdaRHF1YkJXV3J4CmtYTmJwTG9BMGxrcHh5bjdGam9Ya20zM2ZKQURjd2xWSS82WFNrSm1FaFVlZmZnaFFSMGNyVGphQVd1Qkx2Qk0KT0s5aEEzT3RTN2F0S2FDb1lvSmRrYkpHQTdWdytNNzA4NEJOTGhxM1Fyckg4S3M3Z05pdC9NN3lxSnU1alBaZgo2SE1seHNyWU9NVUhuVlk4VDkwN0Q3cS9ORUNnRThzODhnZzAyQ3JNWTFqanE4UnBpUUtDQVFFQTE2UHJaMUEwClNISS83akdmS3BETkJzQ0xrVUFxRERKSzQ0dFdJYmJBUXFhRTN1eDh4bkFlU2NjSHozbS9ScEpPSGtteHZTZlgKbTJ1Wk8veGtNTWhYK2lwOHdFOHZibzR1enVNYitTSXE3bWpialJkK1JJczJ5NHJsZVQ2NGVjRWc4R2pZckExZgpiSEI0MmhQclVTcXpxUVIwOTZocm1Lb1diU0RDZDZwOUVNeWVzT3IwTjdtQmJYVVZPazJxZGtYRlZWbHBlUDdpClFxWGdRUUI0bHgzLzJJdlpBMlhJUXlQdGJ0RWVRbmgyQ3FNM2NDMzR0VEVjZ244K0VwNG9SWmkwTTBHaUY3bXgKOTEvZHY2THZlNTR5K1pON1lXd1NFQ09ubzd5bDlvTlBZVnVGMGRiMjh0elppMThCeHJTQ2JESE1XbExvUzhWNgpXTEo0OGlSODJDYkc1d0tDQVFFQXdFRjM4KzYyeDhDU2x0blZZNlJaN0J0NEdiNEJqVWhWYXZ0NFkxUGFlbXFNCjFidFVnR2JyUnBoNHFUSEFTckUwUUZLeVZKYnlCUkJyRHIxWHU4WWRSVXQzZC92VzlIR1dPd1BKdTN2M3pLbHMKQ2xsZnpFY3J5L1l2aHAzSzlEcGR6OE1icHdueW5xcGV6b0xMNlJpL3JnK0hyTzBueXd1RSt0T2xYVFo2eUtadApHWVdTSVBWaG00NUJkc2ZxUzhnYjVvbjA0bHh4bnhxVnJvN0c0TUR6cmVEYlFhaGdyS3VuRWxwajZ4eW1PVWpBCkdCZDR3QUVrUExxNUUrRWcreDY4TkRLVTYwK29ybFhLWVhDQm5HSFZOQ3BVcmswVXkrcHFZZmFEN3VuR2VzaHMKSEwra3lXbXl5a3ErTmNKbnRXMFNSNy9sU1IvZUFhVEZyVzZVaXV0RGJRS0NBUUVBemhRYU9PNmVPSW51N016QgpScVdCT3EyeDg4cjFKQmpBRnZzbkFpc3JTOGJsZmtGVTdXREdvVTB5K3FWb0ZhSm1RMjI4RFlCUS9YZnp4aTdxCjlPL1JuQU1VbTVoUlJQOWVYbHNPZGFXZ2o1em9ETXRoNFZHRnVUbHhHZERGN1oyU3hBMysyMVlnVm5xYUZCY3IKTUxOMVpOWWNqajJITGl1R0tSNUFtcW4wd2FRN0YrcENJQ3NKTkxqSzQ2QXJnc0lrMXU4TzdCSHgyeTI0eFlZVQp1SjV6emRmQU9nNEFONkhURzY5L2twaWFmb29DeGhNNDlyZ0xmZTdxUEZLbk8vTzJhckdUbmNiWi9BWEMzb3h4Ci81dHRMYlF6R2lSMGtyWHdWSHRKdys4elltQmIzL0RtcWF4RHZueTZMdEo5UGJiTmk1aGw1VnZCRTVqa0dzeWgKL3RQNEN3S0NBUUJ2R1dZb0lKcWZkRGxCMHovdEJOeXlCRzJ5OG9vVEN1blJtT0JKQmZ3TEllZWcyMUJKb3kveQo2OGxPZk9HU1NEVFp0dkEyMGNPcUNZTFVVYmFSWERzdUFCNVp4NzdBSTZPZEZ1Tk01S2FlTG9td3NWVWF4MFlYCjUzd3ZYcUFaNG1DejN4dnJ1MlBwTEtyOHk3anFTdEw1MHgra1hxZlFQaWZxaXNQVXlkYktmT0l2RFhFVWVyaWQKRytmWXJFNUkzS3JDM3BZVStUWmJ1eEVrZm4yUEEvSE5XVk5hN2VKdjVnSDJLU1gwaCtuRzBMT3hPRjhmRlluTApUbHdGa09OdU9xU254Vk1wYUM4aUQ1R1VIVi9JN3dBMTFRQjZlVEM3Wmd0ejhQRHM3MHN6U1A2dzNrNXIxaGpyCnJhV2RpMnBDL1hUQzRiR3VRQ3dhNXcwVTNBSWJCVGxCQW9JQkFEc1RONGhvclVHNWw3MXhLZk5ibVBTbDZ6RlIKYTJ4d2U2VVZPOVZzMFpHeEdLWWJSN1VuVDBDL1FqUiswS2JsbE9leDdFY3cyMklCcmFFVzBGbXpuVnoyUW9FNwpMUE5COXhyTTFEeE56UjZEbFBUeERMcEFGWVlUcm40SWY1cjFVdVdpc2lMdmd6T2xGTlVITnN5UFJIZWNGblhUCnNhTk9JWkgrQTJ5KzF3QWdpSFZIS2JPRGRHeVFQVlQ0TXFFWkJaY2pQcmRBekNKcnloSHlYdHBqRjFSdlFEYTMKTVM3U3JVTGM4djJGQWJ1VG1QZ2R1ZHBKd1Q4dENCa2VRKzZ4YmJWN3YrZzBEMG5EWFNIZFVwNXFyUzcrTnhtVwp4NWV4UHo1VENhYXcxSnkzWjRmT1MzMTV6eHJGdmRHTmhWRXhMMzRlUVlzOHRYN0N0VWxuWkNray9zYz0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0=
---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.0-beta4
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            - --token-ttl=43200
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.1
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  annotations:
    kubernetes.io/ingress.class: traefik
    traefik.ingress.kubernetes.io/frontend-entry-points: http,https
    traefik.ingress.kubernetes.io/redirect-entry-point: https
spec:
  rules:
  - host: csdd.xxxx.com
    http:
      paths:
        - backend:
            serviceName: kubernetes-dashboard
            servicePort: 443
  tls:
   - secretName: dashboard-tls-cert
# 創建kubernetes-dashboard token 登錄
#  生成token
kubectl create sa dashboard-admin -n kube-system
 # 授權token 訪問權限
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
# 獲取token 
ADMIN_SECRET=$(kubectl get secrets -n kube-system | grep dashboard-admin | awk '{print $1}')
# 獲取dashboard.kubeconfig 使用token   值
DASHBOARD_LOGIN_TOKEN=$(kubectl describe secret -n kube-system ${ADMIN_SECRET} | grep -E '^token' | awk '{print $2}')
echo ${DASHBOARD_LOGIN_TOKEN}
# 設置集群參數
kubectl config set-cluster kubernetes \
  --certificate-authority=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=dashboard.kubeconfig

# 設置客戶端認證參數,使用上面創建的 Token
kubectl config set-credentials dashboard_user \
  --token=${DASHBOARD_LOGIN_TOKEN} \
  --kubeconfig=dashboard.kubeconfig

# 設置上下文參數
kubectl config set-context default \
  --cluster=kubernetes \
  --user=dashboard_user \
  --kubeconfig=dashboard.kubeconfig

# 設置默認上下文
kubectl config use-context default --kubeconfig=dashboard.kubeconfig
# 綁定hosts
https://csdd.xxxx.com/#/overview?namespace=default
# kubernetes-dashboard 使用metrics 顯示cpu內存資源 所有要部署metrics-server

metrics-server 部署

# win on Ubuntu  操作 
#創建metrics-server 證書
cat << EOF | tee /apps/work/k8s/cfssl/k8s/metrics-server.json
{
  "CN": "metrics-server",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "GuangDong",
      "L": "GuangZhou",
      "O": "cluster",
      "OU": "cluster"
    }
  ]
}
EOF
### 生成證書
cfssl gencert -ca=/apps/work/k8s/cfssl/pki/k8s/k8s-ca.pem -ca-key=/apps/work/k8s/cfssl/pki/k8s/k8s-ca-key.pem \
    -config=/apps/work/k8s/cfssl/ca-config.json \
    -profile=kubernetes /apps/work/k8s/cfssl/k8s/metrics-server.json | cfssljson -bare ./metrics-server
# 創建metrics-server-secrets.yaml
# base64 加密
cat metrics-server.pem|base64 | tr -d '\n'
cat metrics-server-key.pem|base64 | tr -d '\n'
vi metrics-server-secrets.yaml
apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: metrics-server
  name: metrics-server-certs
  namespace: kube-system
type: Opaque
data:
  metrics-server.pem: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQ3VENDQXRXZ0F3SUJBZ0lVYkloOTQ3Z3NET2gxdVplVnBoMU9GVmhYeHA0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd2JqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUk13CkVRWURWUVFERXdwcmRXSmxjbTVsZEdWek1CNFhEVEU1TURjd05UQXpOVGd3TUZvWERUSTVNRGN3TWpBek5UZ3cKTUZvd2NqRUxNQWtHQTFVRUJoTUNRMDR4RWpBUUJnTlZCQWdUQ1VkMVlXNW5SRzl1WnpFU01CQUdBMVVFQnhNSgpSM1ZoYm1kYWFHOTFNUkF3RGdZRFZRUUtFd2R0WkdSbllXMWxNUkF3RGdZRFZRUUxFd2R0WkdSbllXMWxNUmN3CkZRWURWUVFERXc1dFpYUnlhV056TFhObGNuWmxjakNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0MKQVFvQ2dnRUJBTUlvdHhsakpLcCtyb1hGcGJJWXRuNW1mVXVyREM1bUlkb2Z0RTNSVGhNU1pPSCt0aVVoMDdQRQpnb0xPOG1lSkxaS21ob1BUYzhJWTJYVjdiZzJWWFpRVUd6MFJuMExmNWdWam5UN29yMFFnZzErYnJnZU5wWUtpCjVoNm5ISVE3ZGlKYk10cUFndm16RGR6bWdoUXd2SHBKTzh4bEQwKzRwT0VHT2VtQkNPU3BsaFhrenR3UWQ3ZHYKY2x1QUljQUdiUGF6dzI4VkJJU2F4bCtrZnZwNzIyeEkvVy9DL3pRS1JnN053UG9IaVpFWm9QcGxPY001cGpvUwpJeEdnWVZEYjB6OGlqZWR3RjZmcE9RZkFOcitvQnVONnZnMXAzd2Jud2tKTWtEQUV2MzBXZG1BUzB5STJicC9RCkJZYjU2VWxGTXI4anNoWHJ5dlVsZ3F3S0hscFh0WkVDQXdFQUFhTi9NSDB3RGdZRFZSMFBBUUgvQkFRREFnV2cKTUIwR0ExVWRKUVFXTUJRR0NDc0dBUVVGQndNQkJnZ3JCZ0VGQlFjREFqQU1CZ05WSFJNQkFmOEVBakFBTUIwRwpBMVVkRGdRV0JCVEp6cVJBMWdIN***d3B3TG01ZEtWUHFvdG43VEFmQmdOVkhTTUVHREFXZ0JSeDRLY0FSY2FrClIvYnhtd29UQmVEcyt4QW9hVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBcTgyS0JRMWJBM1pjeG9OZGFJVVUKNjVvSkZhM3paM2k5SDk1QWZvU0dXQWQvbFU3L3B2Qkt2K0EwbS9qQ0tMS1FpQ0xXRGtHYzFUNS8xTzhPYTg0YgpDV3I5M3pOWldRb2N6UWJEaHFCRnZNUVp0azRYVVkrMjR3Yjd3cmJCRHc2QVY2R0l2bVMrYm91eFRVd29sbmRMCk5FS2EvcHNvQUtaRUFJZkJUaCtMNVpMQ09GOXFUWEMyOGtnN1czak4vMzBiYlk5UE5ObVpLcGNaNEpEVjA5aGYKU3RaTjZuOVFXK3ZDcFFoZXVISWVORlR2RnQ5bGtSMVBFYUtHUjFiWEdyeUNHOHNTeXVDc0xER1lnVlhmYVZtYgp3dTlnSG1JS2E2aDZWVmVIWitMbVFmZmxqcEdPRStKV1l1TWRPamtHYUYxdEtLUWZFelZGN3BxT0VTQXkrV3hpCnVBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
  metrics-server-key.pem: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBd2lpM0dXTWtxbjZ1aGNXbHNoaTJmbVo5UzZzTUxtWWgyaCswVGRGT0V4Sms0ZjYyCkpTSFRzOFNDZ3M3eVo0a3RrcWFHZzlOendoalpkWHR1RFpWZGxCUWJQUkdmUXQvbUJXT2RQdWl2UkNDRFg1dXUKQjQybGdxTG1IcWNjaER0Mklsc3kyb0NDK2JNTjNPYUNGREM4ZWtrN3pDVVBUN2lrNFFZNTZZRUk1S21XRmVUTwozQkIzdDI5eVc0QWh4QVpzOXJQRGJ4VUVoSnJHWDZSKytudmJiRWo5YjhML05BcEdEczNBK2dlSmtSbWcrbVU1Cnd6bW1PaElqRWFCaFVOdlRQeUtONTNBWHArazVCOEEydjZnRzQzcStEV25mQnVmQ1FreVFNQVMvZlJaMllCTFQKSWpadW45QUZodm5wU1VVeXZ5T3lGZXZLOVNXQ3JBb2VXbGUxa1FJREFRQUJBb0lCQUQzaEtoSGVSalpNYjZUVQp6QzFKc1FmeDlDYmttZHBEMUxBbkFKajRoekNPNFRZaHJyMkEzVzdpeDFHWFVTeHNUT2o3UjgzRjI1UFZ4YUNxCnVQVjlVRGk4ZTczbjJ1RSthSm41R0ltUE1TUytWQUJwcG5wank0Y3FFYnRkT1RwdmxRUDRHdW9Hb1RlaHVGNVoKM01WQWp5Rk9LOCt4VkFMdGJ5Y0VpL3ArbEc0RGkvOThIcUlDQngwSlhCUnJoV05lWUdZL0c3eGNWT2pCNUl5SQpPNVpoZ1I0Sk9yODloNVZ1RHdIY3E2UVlLQ2sxTktQZzc0Y3BOY2J5ZVZEM0FhYVRHd25QU3BMd0hGaElzTGpNCkllaEJqZzkrZDdyRU8xMHU0azhKWW5qYUdNMzRMM0RlTVVrck95NUMzRjY3RFNwaTJVZUhjOVh7YzViVUFwb0gKTE1zRUxuMENnWUVBenV3STRSN0FrYmZ2VnVFR0hOa0FFNjgzdTJoWk1iUFdwSHVVTGQ5UzJnUzYzODVWUFdJSApiQXp6WGZuTjh2U2dHNVVQdzUxa1VLZVppY0pORTVBWWtOVDRwMWxoTFdKSkwxSWRSdEV3VU5oblVLNlczRWlMCmJLeDhhalk3dkZDV0ZKUmRTUHJYLzViTWU4TVBCWWNTT0FkZEErZFhlaGd0K2x0WEU3LzE1cGNDZ1lFQThEVzcKOEIyZGNYLzZqMVF4UkhEc3ZGUDYxaWxTREJhMEtKUGlobnpSZWVwVHRFc0hvRitBODk2SXRDZFFMSTJZcEZyRApBU1dSSU1VQVVzVE1uMStvZFprOGovd21QRkxzUmRpSVJWZC80ZHdCTmlDNHJxdnkwQTFxUVNJUXF1MC9CcFV2ClRpMjhZeURRdHh0Wmg1d0NDQUx2a0Rqb2N4cXJzbHEwRDViTnNoY0NnWUFFNjB1U3ZuNXBlR3Z5THN0TWFFUSsKTTQ0cG9HTnVjUTRMUHZ6WTBsUlA1b2ppeFM3MWxKMEVqcEpDaTZ1L3VIdXQ3QzlCd1Q0ZlJMeTdyNmpSYkQySgpRK2JkWTV5UnphSmJ3Nkg2aXdLUkNYUDdVUXM1Rldockh3YWVOOGZYeERxdEpwSEpLRjEyTUFtUWI2U3R4dlpCCjZycmxXdHlUaEh2alZnU043YVJVNVFLQmdHbVFJN3lkTnpERy9sVDR1Z0lLNG03Tk5VSGl2TlRsTVYxWHlFZzAKR0ZiTW5PWnh5ck02NVUvRzd5ckUwQjRVU0EyS2VZSktnU0gya1hMT1crSjZSbTBQMzZhak9DWndocmNYTnFQSwpsVCtyMExoNTNzK2NiMFB4Y1UyWWE5ekNFRjJUT0V2U0c2VXdxYWllazFUZVFhSkZzQVFnamo3dmJKOGY3MXVlCmVWMFhBb0dCQUlQWDN1OTJtU3o5clpqOTN3NFNKbUk2K0tkY3BtTGg0TE5IcUdBOVJSNi80T0NWWDBHNHVtZ1YKMkxUOU1MY05CdUVFZE5nTHRHeGIzdTI5cUlLZHZpcDhBZzlTbHQvMlBIcnlsLzIzWWU0bURTWDVUOXNrVXhaRgpjVGxvN3QxSnRZN3NaRU00Vng4cWxQbDcwWXMvSWRuWmhWaFU2d1F2ZGp0TGk1UlU4L2ttCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
# resource-reader.yaml
vi resource-reader.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: system:metrics-server
rules:
- apiGroups:
  - ""
  resources:
  - pods
  - nodes
  - nodes/stats
  verbs:
  - get
  - list
  - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: system:metrics-server
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:metrics-server
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
# metrics-server-service.yaml
vi metrics-server-service.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    kubernetes.io/name: "Metrics-server"
spec:
  selector:
    k8s-app: metrics-server
  ports:
  - port: 443
    protocol: TCP
    targetPort: 443
# metrics-apiservice.yaml
vi metrics-apiservice.yaml
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
  name: v1beta1.metrics.k8s.io
spec:
  service:
    name: metrics-server
    namespace: kube-system
  group: metrics.k8s.io
  version: v1beta1
  insecureSkipTLSVerify: true
  groupPriorityMinimum: 100
  versionPriority: 100

# auth-reader.yaml
vi auth-reader.yaml
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: metrics-server-auth-reader
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system

#  auth-delegator.yaml
vi auth-delegator.yaml
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: metrics-server:system:auth-delegator
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:auth-delegator
subjects:
- kind: ServiceAccount
  name: metrics-server
  namespace: kube-system
# aggregated-metrics-reader.yaml
vi aggregated-metrics-reader.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:aggregated-metrics-reader
  labels:
    rbac.authorization.k8s.io/aggregate-to-view: "true"
    rbac.authorization.k8s.io/aggregate-to-edit: "true"
    rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
  resources: ["pods"]
  verbs: ["get", "list", "watch"]

# metrics-server-deployment.yaml
vi  metrics-server-deployment.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: metrics-server
  namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: metrics-server
  namespace: kube-system
  labels:
    k8s-app: metrics-server
spec:
  selector:
    matchLabels:
      k8s-app: metrics-server
  template:
    metadata:
      name: metrics-server
      labels:
        k8s-app: metrics-server
    spec:
      serviceAccountName: metrics-server
      tolerations:
        - effect: NoSchedule
          key: node.kubernetes.io/unschedulable
          operator: Exists
        - key: NoSchedule
          operator: Exists
          effect: NoSchedule
      volumes:
      # mount in tmp so we can safely use from-scratch images and/or read-only containers
      - name: tmp-dir
        emptyDir: {}
      - name: metrics-server-certs
        secret:
          secretName: metrics-server-certs
      containers:
      - name: metrics-server
        image: juestnow/metrics-server-amd64:v0.3.3
        imagePullPolicy: Always
        command:
        - /metrics-server
        - --tls-cert-file=/certs/metrics-server.pem
        - --tls-private-key-file=/certs/metrics-server-key.pem
        - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
        - --kubelet-insecure-tls
        volumeMounts:
        - name: tmp-dir
          mountPath: /tmp
        - name: metrics-server-certs
          mountPath: /certs
# 創建metrics-server 服務
kubectl apply -f .
# 驗證metrics-server 
kubectl top node
[root@]~]#kubectl top node
NAME     CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
master   177m         7%     2057Mi          35%
[root@]~]#kubectl top pods -A
NAMESPACE              NAME                                        CPU(cores)   MEMORY(bytes)
clusterstorage         nfs-client-provisioner-5f6bc44cd7-fjr7f     3m           15Mi
kube-system            coredns-597b77445b-fhxvr                    4m           23Mi
kube-system            kube-router-5tmgw                           9m           16Mi
kube-system            metrics-server-66d78c47-zn679               1m           14Mi
kube-system            traefik-578574dfdb-dzl22                    6m           41Mi
kubernetes-dashboard   dashboard-metrics-scraper-fb986f88d-rc6zs   1m           25Mi
kubernetes-dashboard   kubernetes-dashboard-668c4f84bc-w6vw6       2m           40Mi

# 能夠正常獲取CPU 內存值

編譯安裝kubernetes 1.15.3
編譯安裝kubernetes 1.15.3

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

龙南县| 绵阳市| 通道| 绥德县| 普宁市| 宁阳县| 桦南县| 达尔| 莱阳市| 龙海市| 德江县| 老河口市| 城固县| 饶河县| 榕江县| 麦盖提县| 界首市| 平乐县| 岐山县| 霍林郭勒市| 高平市| 庆安县| 海安县| 东阳市| 南和县| 沁水县| 蓝田县| 崇州市| 嘉善县| 文化| 攀枝花市| 米易县| 木兰县| 黎平县| 大田县| 贡嘎县| 思南县| 苗栗市| 嫩江县| 桂林市| 白玉县|