您好,登錄后才能下訂單哦!
架構說明:
集群角色 | 主機名 | IP |
---|---|---|
Master | master-1 | 192.168.20.44 |
Master | master-2 | 192.168.20.45 |
Master | master-3 | 192.168.20.46 |
Node | k8s-node-1 | 192.168.20.47 |
Node | k8s-node-2 | 192.168.20.48 |
Node | k8s-node-3 | 192.168.20.49 |
安裝好CentOS7的系統,做以下操作:
yum update
設置內核參數的部分,要確認執行如下操作:
# 高可用Master節點設置內核參數
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_local_port_range = 10000 65000
fs.file-max = 2000000
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
# 其他Master節點和計算節點設置內核參數
cat <<EOF > /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_local_port_range = 10000 65000
fs.file-max = 2000000
net.ipv4.ip_forward = 1
vm.swappiness = 0
EOF
由于在Kubernetes1.14的版本中,支持的版本有 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09,所以這里統一使用Docker 18.09。
下載docker-ce-18.09
的rpm包和阿里云上對應的docker-ce.repo
的源,在所有node節點上直接安裝:
mv docker-ce.repo /etc/yum.repos.d/
yum install docker-ce-18.09.5-3.el7.x86_64.rpm -y
在所有node節點啟動docker,并配置自啟動:
systemctl start docker
systemctl enable docker
在所有主機上執行如下命令,創建所需目錄:
mkdir -p /opt/kubernetes/{cfg,bin,ssl,log}
從github下載kubernetes 1.14版本的二進制軟件包,下載地址: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.14.md#server-binaries
下載如下軟件包:
[root@master-1 tmp]# ll
total 537520
-rw-r--r-- 1 root root 113938518 Jul 24 19:15 kubernetes-node-linux-amd64.tar.gz
-rw-r--r-- 1 root root 433740362 Jul 24 19:09 kubernetes-server-linux-amd64.tar.gz
解壓:
tar xf kubernetes-server-linux-amd64.tar.gz
從github下載etcd v3.3.12和flannel v0.11.0 :
wget https://github.com/etcd-io/etcd/releases/download/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
Kubernetes支持使用多種方式生成證書,可以使用easyrsa, openssl 或 cfssl任一一種生成。
參考鏈接
這里使用cfssl創建CA證書。
使用cfssl生成CA證書需要單獨安裝cfssl。
[root@master-1 ~]# cd /usr/local/src/
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /opt/kubernetes/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /opt/kubernetes/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /opt/kubernetes/bin/cfssl-certinfo
chmod +x /opt/kubernetes/bin/*
所有節點添加Kubernetes的bin目錄到系統環境變量:
echo 'PATH=$PATH:/opt/kubernetes/bin' >>/etc/profile
source /etc/profile
[root@master-1 ~]# cd /opt/kubernetes/ssl/
[root@master-1 ssl]# cfssl print-defaults config > config.json
[root@master-1 ssl]# cfssl print-defaults csr > csr.json
[root@master-1 ssl]# ll
total 8
-rw-r--r-- 1 root root 567 Jul 26 00:05 config.json
-rw-r--r-- 1 root root 287 Jul 26 00:05 csr.json
[root@master-1 ssl]# mv config.json ca-config.json
[root@master-1 ssl]# mv csr.json ca-csr.json
[root@master-1 ssl]# vim ca-config.json
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
ca-csr.json文件:
[root@master-1 ssl]# vim ca-csr.json
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
[root@master-1 ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca
2018/07/26 00:27:00 [INFO] generating a new CA key and certificate from CSR
2018/07/26 00:27:00 [INFO] generate received request
2018/07/26 00:27:00 [INFO] received CSR
2018/07/26 00:27:00 [INFO] generating key: rsa-2048
2018/07/26 00:27:01 [INFO] encoded CSR
2018/07/26 00:27:01 [INFO] signed certificate with serial number 479065525331838190845576195908271097044538206777
[root@master-1 ssl]# ll
total 20
-rw-r--r-- 1 root root 386 Jul 26 00:16 ca-config.json
-rw-r--r-- 1 root root 1001 Jul 26 00:27 ca.csr
-rw-r--r-- 1 root root 255 Jul 26 00:20 ca-csr.json
-rw------- 1 root root 1679 Jul 26 00:27 ca-key.pem
-rw-r--r-- 1 root root 1359 Jul 26 00:27 ca.pem
[root@master-1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.20.45:/opt/kubernetes/ssl
[root@master-1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.20.46:/opt/kubernetes/ssl
[root@master-1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.20.47:/opt/kubernetes/ssl
[root@master-1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.20.48:/opt/kubernetes/ssl
[root@master-1 ssl]# scp ca.csr ca.pem ca-key.pem ca-config.json 192.168.20.49:/opt/kubernetes/ssl
這里選擇兩個Master節點部署Haproxy 和 keepalived, keepalived上需要添加監控haproxy應用的腳本。
yum install keepalived -y
# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy.sh"
interval 3
weight -20
}
vrrp_instance K8S {
state backup
interface eth0
virtual_router_id 44
priority 200
advert_int 5
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.20.50
192.168.20.60
}
track_script {
check_haproxy
}
}
! Configuration File for keepalived
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy.sh"
interval 3
weight -20
}
vrrp_instance K8S {
state backup
interface eth0
virtual_router_id 44
priority 190
advert_int 5
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.20.50
192.168.20.60
}
track_script {
check_haproxy
}
}
vim /etc/keepalived/check_haproxy.sh
#!/bin/bash
active_status=`netstat -lntp|grep haproxy|wc -l`
if [ $active_status -gt 0 ]; then
exit 0
else
exit 1
fi
chmod +x /etc/keepalived/check_haproxy.sh
官方配置手冊
echo 'net.ipv4.ip_nonlocal_bind = 1'>>/etc/sysctl.conf
echo 'net.ipv4.ip_forward = 1'>>/etc/sysctl.conf
sysctl -p
yum install haproxy -y
# cat /etc/haproxy/haproxy.cfg |egrep -v "^#"
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
defaults
mode tcp # 修改默認為四層代理
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
frontend main 192.168.20.50:6443
acl url_static path_beg -i /static /images /javascript /stylesheets
acl url_static path_end -i .jpg .gif .png .css .js
default_backend k8s-node
backend k8s-node
mode tcp # 修改為tcp
balance roundrobin
server k8s-node-1 192.168.20.44:6443 check # 三個master主機
server k8s-node-2 192.168.20.45:6443 check
server k8s-node-3 192.168.20.46:6443 check
配置完成之后檢查IP是否可以自動切換。
執行如下命令,完成etcd的安裝:
[root@master-1 ~]# cd /tmp/
[root@master-1 tmp]# tar xf etcd-v3.3.12-linux-amd64.tar.gz
[root@master-1 tmp]# cd etcd-v3.3.12-linux-amd64
[root@master-1 tmp]# cp etcd* /opt/kubernetes/bin/
[root@master-1 tmp]# scp etcd* 192.168.20.45:/opt/kubernetes/bin/
[root@master-1 tmp]# scp etcd* 192.168.20.46:/opt/kubernetes/bin/
1.創建etcd證書簽名請求
[root@master-1 ~]# vim etcd-csr.json
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.20.44",
"192.168.20.45",
"192.168.20.46"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
2.生成etcd證書
[root@master-1 ~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
會生成如下文件:
[root@master-1 ~]# ll
total 16
-rw-r--r-- 1 root root 1062 Jul 26 01:18 etcd.csr
-rw-r--r-- 1 root root 287 Jul 26 00:50 etcd-csr.json
-rw------- 1 root root 1679 Jul 26 01:18 etcd-key.pem
-rw-r--r-- 1 root root 1436 Jul 26 01:18 etcd.pem
[root@master-1 ~]# cp etcd*.pem /opt/kubernetes/ssl
[root@master-1 ~]# scp etcd*.pem 192.168.20.45:/opt/kubernetes/ssl
[root@master-1 ~]# scp etcd*.pem 192.168.20.46:/opt/kubernetes/ssl
master-1上的配置為:
[root@master-1 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.20.44:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.20.44:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.44:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node-1=https://192.168.20.44:2380,etcd-node-2=https://192.168.20.45:2380,etcd-node-3=https://192.168.20.46:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.44:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
master-2上的配置為:
[root@master-2 tmp]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.20.45:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.20.45:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.45:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node-1=https://192.168.20.44:2380,etcd-node-2=https://192.168.20.45:2380,etcd-node-3=https://192.168.20.46:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.45:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
master-3上的配置為:
[root@master-3 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="https://192.168.20.46:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.20.46:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.46:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node-1=https://192.168.20.44:2380,etcd-node-2=https://192.168.20.45:2380,etcd-node-3=https://192.168.20.46:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.46:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
在三個節點上創建etcd的systemd文件:
[root@master-1 ~]# vim /usr/lib/systemd/system/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos/etcd
Conflicts=etcd.service
Conflicts=etcd2.service
[Service]
Type=notify
Restart=always
RestartSec=5s
LimitNOFILE=40000
TimeoutStartSec=0
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
[Install]
WantedBy=multi-user.target
mkdir /var/lib/etcd
systemctl daemon-reload
systemctl start etcd
systemctl enable etcd
確認所以節點的etcd服務啟動。
[root@master-1 ~]# etcdctl --endpoints=https://192.168.20.44:2379 \
--ca-file=/opt/kubernetes/ssl/ca.pem \
--cert-file=/opt/kubernetes/ssl/etcd.pem \
--key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
member 32922a109cfe00b2 is healthy: got healthy result from https://192.168.20.46:2379
member 4fa519fdd3e64a84 is healthy: got healthy result from https://192.168.20.45:2379
member cab6e832332e8b2a is healthy: got healthy result from https://192.168.20.44:2379
cluster is healthy
[root@master-1 ~]# cd /tmp/kubernetes/server/bin/
[root@master-1 bin]# cp kube-apiserver /opt/kubernetes/bin/
[root@master-1 bin]# cp kube-controller-manager /opt/kubernetes/bin/
[root@master-1 bin]# cp kube-scheduler /opt/kubernetes/bin/
參考鏈接
1.創建用于生成CSR的JSON文件,這里需要指定HA代理的IP,和集群的ClusterIP:
[root@master-1 ~]# cd /opt/kubernetes/ssl
[root@master-1 ssl]# vim kubernetes-csr.json
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.20.50",
"10.1.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
2.生成Kubernetes的證書和私鑰
[root@master-1 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
[root@master-1 ssl]# scp kubernetes*.pem 192.168.20.46:/opt/kubernetes/ssl/
...
[root@master-1 ~]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
197f33fcbbfab2d15603dcc4408358f5
[root@master-1 ~]# vim /opt/kubernetes/ssl/bootstrap-token.csv
197f33fcbbfab2d15603dcc4408358f5,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
[root@k8s-node-1 ~]# vim /opt/kubernetes/ssl/basic-auth.csv
admin,admin,1
readonly,readonly,2
scp -r -p /opt/kubernetes/ssl/* k8s-node-1:/opt/kubernetes/ssl/
scp -r -p /opt/kubernetes/ssl/* k8s-node-2:/opt/kubernetes/ssl/
scp -r -p /opt/kubernetes/ssl/* k8s-node-3:/opt/kubernetes/ssl/
[root@k8s-node-1 ~]# vim /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
--enable-admission-plugins=MutatingAdmissionWebhook,ValidatingAdmissionWebhook,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--bind-address=192.168.20.44 \
--insecure-bind-address=127.0.0.1 \
--authorization-mode=Node,RBAC \
--runtime-config=rbac.authorization.k8s.io/v1 \
--kubelet-https=true \
--anonymous-auth=false \
--basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
--service-cluster-ip-range=10.1.0.0/16 \
--service-node-port-range=20000-40000 \
--tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \
--etcd-certfile=/opt/kubernetes/ssl/kubernetes.pem \
--etcd-keyfile=/opt/kubernetes/ssl/kubernetes-key.pem \
--etcd-servers=https://192.168.20.44:2379,https://192.168.20.45:2379,https://192.168.20.46:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/log/api-audit.log \
--event-ttl=1h \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[root@k8s-node-1 ~]# systemctl daemon-reload
[root@k8s-node-1 ~]# systemctl start kube-apiserver
[root@k8s-node-1 ~]# systemctl enable kube-apiserver
[root@master-1 ~]# systemctl status kube-apiserver
[root@master-1 ~]# netstat -lntp|grep kube-apiserver
tcp 0 0 192.168.20.44:6443 0.0.0.0:* LISTEN 4289/kube-apiserver
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 4289/kube-apiserver
[root@master-1 ~]# vim /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
--bind-address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--allocate-node-cidrs=true \
--service-cluster-ip-range=10.1.0.0/16 \
--cluster-cidr=10.2.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--leader-elect=true \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
[root@master-1 ~]# systemctl daemon-reload
[root@master-1 ~]# systemctl start kube-controller-manager
[root@master-1 ~]# systemctl enable kube-controller-manager
[root@master-1 ~]# systemctl status kube-controller-manager
[root@master-1 ~]# netstat -lntp|grep kube-con
tcp 0 0 127.0.0.1:10252 0.0.0.0:* LISTEN 4390/kube-controlle
[root@master-1 ~]# vim /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
ExecStart=/opt/kubernetes/bin/kube-scheduler \
--address=127.0.0.1 \
--master=http://127.0.0.1:8080 \
--leader-elect=true \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
[root@master-1 ~]# systemctl daemon-reload
[root@master-1 ~]# systemctl start kube-scheduler
[root@master-1 ~]# systemctl enable kube-scheduler
[root@master-1 ~]# systemctl status kube-scheduler
[root@master-1 ~]# netstat -lntp|grep kube-scheduler
tcp 0 0 127.0.0.1:10251 0.0.0.0:* LISTEN 4445/kube-scheduler
(參見node節點部署部分,需要創建對應的kube-proxy家目錄)
[root@master-1 ~]# cd /tmp/kubernetes/node/bin/
[root@master-1 bin]# cp kubectl /opt/kubernetes/bin/
2.創建admin證書簽名
[root@master-1 ~]# vim /opt/kubernetes/ssl/admin-csr.json
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
3.生成admin證書和私鑰
[root@master-1 ~]# cd /opt/kubernetes/ssl/
[root@master-1 ssl]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes admin-csr.json | cfssljson -bare admin
[root@master-1 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.20.50:6443
Cluster "kubernetes" set.
5.設置客戶端認證參數:
[root@naster-1 ~]# kubectl config set-credentials admin \
--client-certificate=/opt/kubernetes/ssl/admin.pem \
--embed-certs=true \
--client-key=/opt/kubernetes/ssl/admin-key.pem
User "admin" set.
6.設置上下文參數
[root@master-1 ~]# kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=admin
Context "kubernetes" created.
7.設置默認上下文:
[root@master-1 ~]# kubectl config use-context kubernetes
Switched to context "kubernetes".
8.使用Kubectl工具查看當前狀態:
[root@master-1 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-1 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-0 Healthy {"health":"true"}
將kubernetes-node-linux-amd64.tar.gz壓縮包在node節點解壓,執行如下操作
[root@k8s-node-1 ~]# cd /tmp/kubernetes/node/bin
[root@k8s-node-1 bin]# cp kubelet kube-proxy /opt/kubernetes/bin/
[root@k8s-node-1 bin]# scp kubelet kube-proxy 192.168.20.48:/opt/kubernetes/bin/
[root@k8s-node-1 bin]# scp kubelet kube-proxy 192.168.20.49:/opt/kubernetes/bin/
[root@master-1 ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io "kubelet-bootstrap" created
[root@master-1 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.20.50:6443 \
--kubeconfig=bootstrap.kubeconfig
Cluster "kubernetes" set.
3.設置客戶端認證參數
[root@master-1 ~]# kubectl config set-credentials kubelet-bootstrap \
--token=197f33fcbbfab2d15603dcc4408358f5 \
--kubeconfig=bootstrap.kubeconfig
User "kubelet-bootstrap" set.
4.設置上下文認證參數
[root@master-1 ~]# kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
Context "default" created.
5.選擇默認上下文
[root@master-1 ~]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
Switched to context "default"
6.執行上面的操作后,會在當前目錄生成一個bootstrap.kubeconfig的config文件,將此文件分發到各節點:
[root@k8s-node-1 ~]# cp bootstrap.kubeconfig /opt/kubernetes/cfg/
[root@k8s-node-1 ~]# scp bootstrap.kubeconfig 192.168.20.47:/opt/kubernetes/cfg/
[root@k8s-node-1 ~]# scp bootstrap.kubeconfig 192.168.20.48:/opt/kubernetes/cfg/
[root@k8s-node-1 ~]# scp bootstrap.kubeconfig 192.168.20.49:/opt/kubernetes/cfg/
以下操作需要在所有node節點執行
[root@k8s-node-2 ~]# mkdir -p /etc/cni/net.d
[root@k8s-node-2 ~]# vim /etc/cni/net.d/10-default.conf
{
"name": "flannel",
"type": "flannel",
"delegate": {
"bridge": "docker0",
"isDefaultGateway": true,
"mtu": 1400
}
}
以下操作需要在所有node節點執行
[root@k8s-node-2 ~]# mkdir /var/lib/kubelet
[root@k8s-node-2 ~]# vim /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
--address=192.168.20.48 \
--hostname-override=192.168.20.48 \
--pod-infra-container-image=mirrorgooglecontainers/pause-amd64:3.1 \
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--cert-dir=/opt/kubernetes/ssl \
--network-plugin=cni \
--cni-conf-dir=/etc/cni/net.d \
--cni-bin-dir=/opt/kubernetes/bin/cni \
--cluster-dns=10.1.0.2 \
--cluster-domain=cluster.local. \
--hairpin-mode hairpin-veth \
--allow-privileged=true \
--fail-swap-on=false \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
[root@k8s-node-2 ~]# systemctl daemon-reload
[root@k8s-node-2 ~]# systemctl start kubelet
[root@k8s-node-2 ~]# systemctl enable kubelet
[root@k8s-node-2 ~]# systemctl status kubelet
[root@master-1 ~]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-FDH7Y3rghf1WPsEJH2EYnofvOSeyHn2f-l_-4rH-LEk 2m kubelet-bootstrap Pending
[root@master-1 ~]# kubectl get csr|grep 'Pending' | awk 'NR>0{print $1}'| xargs kubectl certificate approve
certificatesigningrequest.certificates.k8s.io "node-csr-FDH7Y3rghf1WPsEJH2EYnofvOSeyHn2f-l_-4rH-LEk" approved
[root@kmaster-1 ~]# kubectl get csr
NAME AGE REQUESTOR CONDITION
node-csr-FDH7Y3rghf1WPsEJH2EYnofvOSeyHn2f-l_-4rH-LEk 11m kubelet-bootstrap Approved,Issued
[root@master-1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.20.48 Ready <none> 35s v1.14.1
node節點上查看kubelet 服務
[root@k8s-node-2 ~]# netstat -lntp|grep kubelet
tcp 0 0 127.0.0.1:10248 0.0.0.0:* LISTEN 7917/kubelet
tcp 0 0 192.168.20.32:10250 0.0.0.0:* LISTEN 7917/kubelet
tcp 0 0 192.168.20.32:10255 0.0.0.0:* LISTEN 7917/kubelet
tcp 0 0 192.168.20.32:4194 0.0.0.0:* LISTEN 7917/kubelet
1.配置kube-proxy使用LVS,所有節點執行:
yum install -y ipvsadm ipset conntrack
2.創建證書請求
[root@master-1 ~]# vim kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
3.生成證書
[root@master-1 ~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
4.分發證書到所有node節點
[root@master-1 ~]# cp kube-proxy*.pem /opt/kubernetes/ssl/
[root@master-1 ~]# scp kube-proxy*.pem 192.168.20.47:/opt/kubernetes/ssl/
[root@master-1 ~]# scp kube-proxy*.pem 192.168.20.48:/opt/kubernetes/ssl/
[root@master-1 ~]# scp kube-proxy*.pem 192.168.20.49:/opt/kubernetes/ssl/
5.創建kube-proxy配置文件
[root@k8s-node-2 ~]# kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.20.50:6443 \
--kubeconfig=kube-proxy.kubeconfig
Cluster "kubernetes" set.
6.創建kube-proxy用戶:
[root@k8s-node-2 ~]# kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
User "kube-proxy" set.
7.設置默認上下文:
[root@k8s-node-2 ~]# kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
Context "default" created.
8.切換上下文為default:
[root@k8s-node-2 ~]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
Switched to context "default".
9.分發kube-proxy.kubeconfig配置文件到所有
[root@k8s-node-2 ~]# scp kube-proxy.kubeconfig 192.168.20.44:/opt/kubernetes/cfg/
[root@k8s-node-2 ~]# scp kube-proxy.kubeconfig 192.168.20.45:/opt/kubernetes/cfg/
[root@k8s-node-2 ~]# scp kube-proxy.kubeconfig 192.168.20.46:/opt/kubernetes/cfg/
[root@k8s-node-2 ~]# scp kube-proxy.kubeconfig 192.168.20.47:/opt/kubernetes/cfg/
[root@k8s-node-2 ~]# scp kube-proxy.kubeconfig 192.168.20.48:/opt/kubernetes/cfg/
[root@k8s-node-2 ~]# scp kube-proxy.kubeconfig 192.168.20.459/opt/kubernetes/cfg/
10.創建kube-proxy服務配置文件
所有節點執行,注意配置文件中IP需要修改為本機對應的IP
[root@k8s-node-1 ~]# mkdir /var/lib/kube-proxy
[root@k8s-node-1 ~]# vim /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/var/lib/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
--bind-address=192.168.20.47 \
--hostname-override=192.168.20.47 \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig \
--masquerade-all \
--feature-gates=SupportIPVSProxyMode=true \
--proxy-mode=ipvs \
--ipvs-min-sync-period=5s \
--ipvs-sync-period=5s \
--ipvs-scheduler=rr \
--logtostderr=true \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
11.啟動服務
systemctl start kube-proxy
systemctl enable kube-proxy
systemctl status kube-proxy
12.查看服務狀態,lvs狀態
[root@k8s-node-1 ~]# ipvsadm -L -n
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.1.0.1:443 rr
-> 192.168.20.44:6443 Masq 1 0 0
-> 192.168.20.45:6443 Masq 1 0 0
-> 192.168.20.46:6443 Masq 1 1 0
所有node節點配置成功后可以看到如下結果:
[root@master-1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.20.47 Ready <none> 6d21h v1.14.1
192.168.20.48 Ready <none> 4d1h v1.14.1
192.168.20.49 Ready <none> 4d1h v1.14.1
所有節點都需要部署flannel。
1.生成證書文件
[root@master-1 ~]# vim flanneld-csr.json
{
"CN": "flanneld",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "BeiJing",
"L": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
2.生成證書
[root@master-1 ~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
3.分發證書
[root@master-1 ~]# cp flanneld*.pem /opt/kubernetes/ssl/
[root@master-1 ~]# scp flanneld*.pem {all-k8s-node}:/opt/kubernetes/ssl/
1.將之前下載的flannel壓縮包解壓,并分發到其它節點如下操作:
cp mk-docker-opts.sh flanneld /opt/kubernetes/bin/
scp mk-docker-opts.sh flanneld {all-k8s-node}:/opt/kubernetes/bin/
2.創建如下文件,分發到各個node節點:
[root@k8s-node-1 tmp]# vim remove-docker0.sh
#!/bin/bash
# Delete default docker bridge, so that docker can start with flannel network.
# exit on any error
set -e
rc=0
ip link show docker0 >/dev/null 2>&1 || rc="$?"
if [[ "$rc" -eq "0" ]]; then
ip link set dev docker0 down
ip link delete docker0
fi
[root@k8s-node-1 tmp]# cp remove-docker0.sh /opt/kubernetes/bin/
[root@k8s-node-1 tmp]# scp remove-docker0.sh 192.168.20.48:/opt/kubernetes/bin/
[root@k8s-node-1 tmp]# scp remove-docker0.sh 192.168.20.49:/opt/kubernetes/bin/
3.配置flannel
[root@k8s-node-1 ~]# vim /opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=https://192.168.20.31:2379,https://192.168.20.32:2379,https://192.168.20.33:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"
[root@k8s-node-1 ~]# vim /usr/lib/systemd/system/flannel.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
Before=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/flannel
ExecStartPre=/opt/kubernetes/bin/remove-docker0.sh
ExecStart=/opt/kubernetes/bin/flanneld ${FLANNEL_ETCD} ${FLANNEL_ETCD_KEY} ${FLANNEL_ETCD_CAFILE} ${FLANNEL_ETCD_CERTFILE} ${FLANNEL_ETCD_KEYFILE}
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -d /run/flannel/docker
Type=notify
[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
5.分發創建的配置文件到各個節點:
scp /opt/kubernetes/cfg/flannel {all-k8s-node}:/opt/kubernetes/cfg/
scp /usr/lib/systemd/system/flannel.service {all-k8s-node}:/usr/lib/systemd/system/
1.下載CNI插件
wget https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz
[root@k8s-node-1 tmp]# mkdir /opt/kubernetes/bin/cni
[root@k8s-node-1 tmp]# tar xf cni-plugins-amd64-v0.7.5.tgz -C /opt/kubernetes/bin/cni
2.分發軟件到各個節點:
[root@k8s-node-1 ~]# scp -r /opt/kubernetes/bin/cni/* {all-k8s-node}:/opt/kubernetes/bin/cni/
3.在etcd中創建key
[root@master-1 ~]# /opt/kubernetes/bin/etcdctl --ca-file /opt/kubernetes/ssl/ca.pem --cert-file /opt/kubernetes/ssl/flanneld.pem --key-file /opt/kubernetes/ssl/flanneld-key.pem \
--no-sync -C https://192.168.20.44:2379,https://192.168.20.45:2379,https://192.168.20.46:2379 \
mk /kubernetes/network/config '{ "Network": "10.2.0.0/16", "Backend": { "Type": "vxlan", "VNI": 1 }}' >/dev/null 2>&1
4.各個節點啟動flannel
[root@k8s-node-1 ~]# chmod +x /opt/kubernetes/bin/*
[root@k8s-node-1 ~]# systemctl daemon-reload
[root@k8s-node-1 ~]# systemctl start flannel
[root@k8s-node-1 ~]# systemctl enable flannel
1.修改docker的systemd的文件:
[Unit] #在Unit下面修改After和增加Requires
After=network-online.target firewalld.service flannel.service
Wants=network-online.target
Requires=flannel.service
[Service] #增加EnvironmentFile=-/run/flannel/docker
Type=notify
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd $DOCKER_OPTS
2.其它NODE節點也做相同的修改
[root@k8s-node-2 ~]# scp /usr/lib/systemd/system/docker.service {k8s-node}:/usr/lib/systemd/system/
3.重啟docker, 出現docker0網卡,且在10.2.0.0/16網段,說明配置成功
[root@k8s-node-3 ~]# systemctl daemon-reload
[root@k8s-node-3 ~]# systemctl restart docker
[root@k8s-node-3 ~]# ip a| grep -A 3 'docker0'
7: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:e9:2b:36:86 brd ff:ff:ff:ff:ff:ff
inet 10.2.79.1/24 scope global docker0
valid_lft forever preferred_lft forever
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
data:
Corefile: |
.:53 {
errors
health
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
upstream
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
proxy . /etc/resolv.conf
cache 30
}
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
serviceAccountName: coredns
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: coredns
image: coredns/coredns:1.4.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 10.1.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
[root@master-1 tmp]# kubectl create -f coredns.yaml
[root@master-1 ~]# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-76fcfc9f65-9fkfh 1/1 Running 2 3d7h 10.2.45.3 192.168.20.49 <none> <none>
coredns-76fcfc9f65-zfplt 1/1 Running 1 3d6h 10.2.24.2 192.168.20.48 <none> <none>
1.執行目錄中的yaml,部署Dashboard:
[root@master-1 ~]# ll /tmp/dashboard/
total 20
-rw-r--r-- 1 root root 356 Jul 27 03:43 admin-user-sa-rbac.yaml
-rw-r--r-- 1 root root 4253 Jul 27 03:47 kubernetes-dashboard.yaml
-rw-r--r-- 1 root root 458 Jul 27 03:49 ui-admin-rbac.yaml
-rw-r--r-- 1 root root 477 Jul 27 03:50 ui-read-rbac.yaml
[root@master-1 ~]# kubectl create -f /tmp/dashboard/
2.確認服務是否正常運行:
[root@master-1 ~]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-76fcfc9f65-9fkfh 1/1 Running 2 3d7h
coredns-76fcfc9f65-zfplt 1/1 Running 1 3d6h
kubernetes-dashboard-68ddcc97fc-w4bxf 1/1 Running 1 3d2h
[root@master-1 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.20.50:6443
CoreDNS is running at https://192.168.20.50:6443/api/v1/namespaces/kube-system/services/coredns:dns/proxy
kubernetes-dashboard is running at https://192.168.20.50:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
3.根據提示信息,使用dashboard的url,登錄,賬號admin/admin, 使用如下命令生成token:
[root@master-1 ~]# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
4.復制token,選擇使用令牌的方式登錄:
1.使用如下文件部署Heastper:
[root@master-1 ~]# ll heastper/
total 12
-rw-r--r-- 1 root root 2306 Jul 26 20:28 grafana.yaml
-rw-r--r-- 1 root root 1562 Jul 26 20:29 heapster.yaml
-rw-r--r-- 1 root root 1161 Jul 26 20:29 influxdb.yaml
[root@k8s-node-1 ~]# kubectl create -f heastper/
kubectl cluster-info
命令,查看當前服務的url地址。在實際的生產環境中,如果都是使用的內網環境,可以將etd集群配置為無證書的模式,這樣在配置和后續的故障恢復中會更簡單。
etcd無證書配置需要使用http訪問,安裝上述文檔,需要修改如下配置:
# cat /opt/kubernetes/cfg/etcd.conf
#[member]
ETCD_NAME="etcd-node-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
ETCD_LISTEN_PEER_URLS="http://192.168.20.31:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.20.31:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.20.31:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node-1=http://192.168.20.31:2380,etcd-node-2=http://192.168.20.32:2380,etcd-node-3=http://192.168.20.33:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.20.31:2379"
#[security]
#CLIENT_CERT_AUTH="true"
#ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
#ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
#ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
#PEER_CLIENT_CERT_AUTH="true"
#ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
#ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
#ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
# cat /opt/kubernetes/cfg/flannel
FLANNEL_ETCD="-etcd-endpoints=http://192.168.20.31:2379,http://192.168.20.32:2379,http://192.168.20.33:2379"
FLANNEL_ETCD_KEY="-etcd-prefix=/kubernetes/network"
#FLANNEL_ETCD_CAFILE="--etcd-cafile=/opt/kubernetes/ssl/ca.pem"
#FLANNEL_ETCD_CERTFILE="--etcd-certfile=/opt/kubernetes/ssl/flanneld.pem"
#FLANNEL_ETCD_KEYFILE="--etcd-keyfile=/opt/kubernetes/ssl/flanneld-key.pem"
3.kube-apiserver中去掉etcd的證書配置,這個文件中需要把參數直接刪除,并將url改為http:
# cat /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
--bind-address=192.168.20.31 \
--insecure-bind-address=127.0.0.1 \
--authorization-mode=Node,RBAC \
--runtime-config=rbac.authorization.k8s.io/v1 \
--kubelet-https=true \
--anonymous-auth=false \
--basic-auth-file=/opt/kubernetes/ssl/basic-auth.csv \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/ssl/bootstrap-token.csv \
--service-cluster-ip-range=10.1.0.0/16 \
--service-node-port-range=20000-40000 \
--tls-cert-file=/opt/kubernetes/ssl/kubernetes.pem \
--tls-private-key-file=/opt/kubernetes/ssl/kubernetes-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-servers=http://192.168.20.31:2379,http://192.168.20.32:2379,http://192.168.20.33:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/log/api-audit.log \
--event-ttl=1h \
--v=2 \
--logtostderr=false \
--log-dir=/opt/kubernetes/log
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
4.分別重啟flannel、kubelet、kube-apiserver等服務。
免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。