91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

centos7 k8s集群部署

發布時間:2020-05-10 18:59:17 來源:網絡 閱讀:49294 作者:huangzai2014 欄目:建站服務器

安裝k8s集群前期準備:
網絡環境:

節點 主機名 ip
Master k8s_master 192.168.3.216
Node1 k8s_node1 192.168.3.217
Node2 k8s_node2 192.168.3.219

centos7版本:
  [root@k8s_master ~]# cat /etc/redhat-release
  CentOS Linux release 7.4.1708 (Core)

關閉firewalld:
  systemctl stop firewalld
  systemctl disable firewalld

三臺主機基礎服務安裝:
  [root@k8s_master ~]#yum -y update
  [root@k8s_master ~]#yum -y install net-tools wget vim ntpd
  [root@k8s_master ~]#systemctl enable ntpd
  [root@k8s_master ~]#systemctl start ntpd

分別在三臺主機,設置主機名:
  Master
  hostnamectl --static set-hostname k8s_master
  Node1
  hostnamectl --static set-hostname k8s_client1
  Node2
  hostnamectl --static set-hostname k8s_client2

設置hosts,分別再三臺主機執行:

cat <<EOF > /etc/hosts
192.168.3.217 k8s_client1
192.168.3.219 k8s_client2
192.168.3.216 k8s_master
EOF

部署Master操作:
  安裝etcd服務:
  [root@k8s_master ~]# yum -y install etcd

編輯配置文件 /etc/etcd/etcd.conf
  [root@k8s_master ~]# cat /etc/etcd/etcd.conf | grep -v "^#"

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379,http://0.0.0.0:4001"
ETCD_NAME="master"
ETCD_ADVERTISE_CLIENT_URLS="http://k8s_master:2379,http://k8s_master:4001"

  設置開機啟動并驗證狀態
  [root@k8s_master ~]#systemctl enable etcd
  [root@k8s_master ~]#systemctl start etcd

  etcd檢查
  [root@k8s_master ~]# etcdctl -C http://k8s_master:4001 cluster-health
  member 8e9e05c52164694d is healthy: got healthy result from http://k8s_master:2379
  cluster is healthy
  [root@k8s_master ~]# etcdctl -C http://k8s_master:2379 cluster-health
  member 8e9e05c52164694d is healthy: got healthy result from http://k8s_master:2379
  cluster is healthy

安裝docker服務
  [root@k8s_master ~]# yum -y install docker
  設置開機啟動,開啟服務:
  [root@k8s_master ~]#systemctl enable docker
  [root@k8s_master ~]#systemctl start docker
查看docker版本:
[root@k8s_master ~]# docker version
Client:
Version: 1.12.6
API version: 1.24
Package version: docker-1.12.6-71.git3e8e77d.el7.centos.1.x86_64
Go version: go1.8.3
Git commit: 3e8e77d/1.12.6
Built: Tue Jan 30 09:17:00 2018
OS/Arch: linux/amd64

Server:
Version: 1.12.6
API version: 1.24
Package version: docker-1.12.6-71.git3e8e77d.el7.centos.1.x86_64
Go version: go1.8.3
Git commit: 3e8e77d/1.12.6
Built: Tue Jan 30 09:17:00 2018
OS/Arch: linux/amd64

安裝kubernetes
  [root@k8s_master ~]# yum install kubernetes

在kubernetes master上需要運行以下組件:
    Kubernets API Server
    Kubernets Controller Manager
    Kubernets Scheduler

修改apiserver服務配置文件:
[root@k8s_master ~]# cat /etc/kubernetes/apiserver | grep -v "^#"

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.3.216:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_API_ARGS=""

修改config配置文件:
[root@k8s_master ~]# cat /etc/kubernetes/config | grep -v "^#"

KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.3.216:8080"

設置開機啟動,開啟服務
  [root@k8s_master ~]#systemctl enable kube-apiserver kube-controller-manager kube-scheduler
  [root@k8s_master ~]#systemctl start kube-apiserver kube-controller-manager kube-scheduler

查看服務端口:
[root@k8s_master ~]# netstat -tnlp

Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 127.0.0.1:2380          0.0.0.0:*               LISTEN      973/etcd            
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      970/sshd            
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      1184/master         
tcp6       0      0 :::6443                 :::*                    LISTEN      1253/kube-apiserver 
tcp6       0      0 :::2379                 :::*                    LISTEN      973/etcd            
tcp6       0      0 :::10251                :::*                    LISTEN      675/kube-scheduler  
tcp6       0      0 :::10252                :::*                    LISTEN      674/kube-controller 
tcp6       0      0 :::8080                 :::*                    LISTEN      1253/kube-apiserver 
tcp6       0      0 :::22                   :::*                    LISTEN      970/sshd            
tcp6       0      0 ::1:25                  :::*                    LISTEN      1184/master         
tcp6       0      0 :::4001                 :::*                    LISTEN      973/etcd 

部署Node:
安裝docker
   參考Master安裝方法
安裝kubernetes
  參考Master安裝方法
配置、啟動kubernetes
  node節點上需要運行一下組件
   kubelet kube-proxy

Node節點主機做以下配置:
config:
[root@k8s_client1 ~]# cat /etc/kubernetes/config | grep -v "^#"

KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.3.216:8080"
kubelet:
[root@k8s_client1 ~]# cat /etc/kubernetes/kubelet | grep -v "^#"
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=192.168.3.217"
KUBELET_API_SERVER="--api-servers=http://192.168.3.216:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""

設置開機啟動、開啟服務
  [root@k8s_client1 ~]# systemctl enable kubelet kube-proxy
  [root@k8s_client1 ~]# systemctl start kubelet kube-proxy

查看端口:
[root@k8s_client1 ~]# netstat -ntlp

Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address           Foreign Address         State       PID/Program name    
tcp        0      0 0.0.0.0:22              0.0.0.0:*               LISTEN      942/sshd            
tcp        0      0 127.0.0.1:25            0.0.0.0:*               LISTEN      2258/master         
tcp        0      0 127.0.0.1:10248         0.0.0.0:*               LISTEN      17932/kubelet       
tcp        0      0 127.0.0.1:10249         0.0.0.0:*               LISTEN      17728/kube-proxy    
tcp6       0      0 :::10250                :::*                    LISTEN      17932/kubelet       
tcp6       0      0 :::10255                :::*                    LISTEN      17932/kubelet       
tcp6       0      0 :::22                   :::*                    LISTEN      942/sshd            
tcp6       0      0 ::1:25                  :::*                    LISTEN      2258/master         
tcp6       0      0 :::4194                 :::*                    LISTEN      17932/kubelet

Master上查看集群中的節點及節點狀態
  [root@k8s_master ~]# kubectl get node

NAME            STATUS     AGE
127.0.0.1       NotReady   1d
192.168.3.217   Ready      1d
192.168.3.219   Ready      1d

  [root@k8s_master ~]# kubectl -s http://k8s_master:8080 get node

NAME            STATUS     AGE
127.0.0.1       NotReady   1d
192.168.3.217   Ready      1d
192.168.3.219   Ready      1d

kubernetes集群搭建完成,還需flannel安裝
flannel是CoreOS提供用于解決Dokcer集群跨主機通訊的覆蓋網絡工具。它的主要思路是:預先留出一個網段,每個主機使用其中一部分,然后每個容器被分配不同的ip;讓所有的容器認為大家在同一個直連的網絡,底層通過UDP/VxLAN等進行報文的封裝和轉發。

Master/Node上flannel安裝:
  [root@k8s_master ~]#yum install flannel

flannel配置:
  Master/Node上修改/etc/sysconfig/flanneld

Master:
  [root@k8s_master ~]# cat /etc/sysconfig/flanneld | grep -v "^#"

FLANNEL_ETCD_ENDPOINTS="http://192.168.3.216:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"

Node:
  [root@k8s_client1 ~]# cat /etc/sysconfig/flanneld | grep -v "^#"

FLANNEL_ETCD_ENDPOINTS="http://192.168.3.216:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"

添加網絡:
[root@k8s_master ~]#etcdctl mk //atomic.io/network/config '{"Network":"172.8.0.0/16"}'

Master/Node設置服務開機啟動
[root@k8s_master ~]# systemctl enable flanneld
[root@k8s_master ~]# systemctl start flanneld

Master/Node節點重啟服務:
Master:

for SERVICES in docker kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $SERVICES ; done

Node:
  [root@k8s_client1 ~]#systemctl restart kube-proxy kubelet docker

查看flannel網絡:
  Master節點:
[root@k8s_master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether 00:50:56:98:3b:d4 brd ff:ff:ff:ff:ff:ff
inet 192.168.3.216/24 brd 192.168.3.255 scope global ens160
valid_lft forever preferred_lft forever
inet6 fe80::250:56ff:fe98:3bd4/64 scope link
valid_lft forever preferred_lft forever
3: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN qlen 500
link/none
inet 10.8.57.0/16 scope global flannel0
valid_lft forever preferred_lft forever
inet6 fe80::3578:6e81:8dc9:ed82/64 scope link flags 800
valid_lft forever preferred_lft forever
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:8b:7c:fd:8d brd ff:ff:ff:ff:ff:ff
inet 10.8.57.1/24 scope global docker0
valid_lft forever preferred_lft forever

  Node節點:
[root@k8s_client1 ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether 00:50:56:98:65:e0 brd ff:ff:ff:ff:ff:ff
inet 192.168.3.217/24 brd 192.168.3.255 scope global ens160
valid_lft forever preferred_lft forever
inet6 fe80::250:56ff:fe98:65e0/64 scope link
valid_lft forever preferred_lft forever
4: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN
link/ether 02:42:23:4b:85:6f brd ff:ff:ff:ff:ff:ff
inet 10.8.6.1/24 scope global docker0
valid_lft forever preferred_lft forever
9: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN qlen 500
link/none
inet 10.8.6.0/16 scope global flannel0
valid_lft forever preferred_lft forever
inet6 fe80::827:f63e:34ee:1f8e/64 scope link flags 800
valid_lft forever preferred_lft forever

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

昭觉县| 旬阳县| 磴口县| 安远县| 神木县| 阿勒泰市| 玉门市| 丹寨县| 泰安市| 运城市| 宜宾县| 长春市| 昌江| 万盛区| 盱眙县| 安宁市| 安福县| 雷波县| 西藏| 鄂托克前旗| 灵寿县| 洪泽县| 凭祥市| 彰化县| 大厂| 定州市| 吉木乃县| 灵丘县| 长岭县| 长武县| 泸西县| 贵阳市| 巴南区| 曲周县| 新营市| 武汉市| 新沂市| 礼泉县| 吉安县| 类乌齐县| 平江县|