ip
|
组件
|
备注
|
---|---|---|
192.168.151.41 | kube-api-server
kube-controller kube-schedular etcd |
|
192.168.151.40 | etcd
flannel docker kubelet kube-proxy
|
|
192.168.151.38 | etcd | |
参考文档: kubernetes-handbook
软件依赖
- golang,略过
- cfssl,证书相关
1
2
|
$ go get -u github.com /cloudflare/cfssl/cmd/cfssl $ go get -u github.com /cloudflare/cfssl/cmd/ ... |
cfssl的使用请参考: cfssl使用文档
CA
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
$ mkdir /root/zhaoming9/ssl $ cd /root/zhaoming9/ssl $ cfssl print-defaults config > config.json $ cfssl print-defaults csr > csr.json # 根据config.json文件的格式创建如下的ca-config.json文件 # 过期时间设置成了 87600h $ cat > ca-config.json <<EOF { "signing" : { "default" : { "expiry" : "87600h" }, "profiles" : { "kubernetes" : { "usages" : [ "signing" , "key encipherment" , "server auth" , "client auth" ], "expiry" : "87600h" } } } } EOF |
kubernetes证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
$ vi kubernetes-csr.json { "CN" : "kubernetes" , "hosts" : [ "127.0.0.1" , "192.168.151.41" , "192.168.151.40" , "192.168.151.38" , "192.168.151.33" , "10.1.0.1" , "kubernetes" , "kubernetes.default" , "kubernetes.default.svc" , "kubernetes.default.svc.cluster" , "kubernetes.default.svc.cluster.local" ], "key" : { "algo" : "rsa" , "size" : 2048 }, "names" : [ { "C" : "CN" , "ST" : "BeiJing" , "L" : "BeiJing" , "O" : "k8s" , "OU" : "System" } ] } $ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes |
admin证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
$ vi admin-csr.json { "CN" : "admin" , "hosts" : [], "key" : { "algo" : "rsa" , "size" : 2048 }, "names" : [ { "C" : "CN" , "ST" : "BeiJing" , "L" : "BeiJing" , "O" : "system:masters" , "OU" : "System" } ] } $ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin |
kube-proxy证书
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
$ vi kube-proxy-csr.json { "CN" : "system:kube-proxy" , "hosts" : [], "key" : { "algo" : "rsa" , "size" : 2048 }, "names" : [ { "C" : "CN" , "ST" : "BeiJing" , "L" : "BeiJing" , "O" : "k8s" , "OU" : "System" } ] } $ cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy |
证书分发
1
2
|
$ mkdir -p /etc/kubernetes/ssl $ cp *.pem /etc/kubernetes/ssl |
每台主机都相同
ETCD集群安装
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
|
$ wget https: //github .com /coreos/etcd/releases/download/v3 .2.7 /etcd-v3 .2.7-linux-amd64. tar .gz $ tar xzvf etcd-v3.2.7-linux-amd64. tar .gz $ mv etcd-v3.2.7-linux-amd64 /etcd * /usr/local/bin $ vi /etc/systemd/system/etcd .service [Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target Documentation=https: //github .com /coreos [Service] Type=notify WorkingDirectory= /var/lib/etcd/ EnvironmentFile=- /etc/etcd/etcd .conf ExecStart= /usr/local/bin/etcd \ --name ${ETCD_NAME} \ --cert- file = /etc/kubernetes/ssl/kubernetes .pem \ --key- file = /etc/kubernetes/ssl/kubernetes-key .pem \ --peer-cert- file = /etc/kubernetes/ssl/kubernetes .pem \ --peer-key- file = /etc/kubernetes/ssl/kubernetes-key .pem \ --trusted-ca- file = /etc/kubernetes/ssl/ca .pem \ --peer-trusted-ca- file = /etc/kubernetes/ssl/ca .pem \ --initial-advertise-peer-urls ${ETCD_INITIAL_ADVERTISE_PEER_URLS} \ --listen-peer-urls ${ETCD_LISTEN_PEER_URLS} \ --listen-client-urls ${ETCD_LISTEN_CLIENT_URLS},http: //127 .0.0.1:2379 \ --advertise-client-urls ${ETCD_ADVERTISE_CLIENT_URLS} \ --initial-cluster-token ${ETCD_INITIAL_CLUSTER_TOKEN} \ --initial-cluster ${ETCD_INITIAL_CLUSTER} \ --initial-cluster-state new \ --data- dir =${ETCD_DATA_DIR} Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target $ vi /etc/etcd/etcd .conf # [member] ETCD_NAME=infra1 ETCD_DATA_DIR= "/var/lib/etcd" ETCD_LISTEN_PEER_URLS= "https://192.168.151.41:2380" ETCD_LISTEN_CLIENT_URLS= "https://192.168.151.41:2379" # [cluster] ETCD_INITIAL_ADVERTISE_PEER_URLS= "https://192.168.151.41:2380" ETCD_INITIAL_CLUSTER_TOKEN= "etcd-cluster" ETCD_INITIAL_CLUSTER= "infra1=https://192.168.151.41:2380,infra2=https://192.168.151.40:2380,infra3=https://192.168.151.38:2380" ETCD_ADVERTISE_CLIENT_URLS= "https://192.168.151.41:2379" $ systemctl daemon-reload $ systemctl enable etcd $ systemctl start etcd $ stemctl status etcd $ etcdctl \ --ca- file = /etc/kubernetes/ssl/ca .pem \ --cert- file = /etc/kubernetes/ssl/kubernetes .pem \ --key- file = /etc/kubernetes/ssl/kubernetes-key .pem \ cluster-health |
kubernetes
1
2
3
|
$ cd /root/zhaoming9 $ wget https: //github .com /kubernetes/kubernetes/releases/download/v1 .7.5 /kubernetes . tar .gz $ tar xzvf kubernetes. tar .gz |
kubectl kubeconfig
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
# 设置集群参数 $ kubectl config set -cluster kubernetes \ --certificate-authority= /etc/kubernetes/ssl/ca .pem \ --embed-certs= true \ --server=${KUBE_APISERVER} # 设置客户端认证参数 $ kubectl config set -credentials admin \ --client-certificate= /etc/kubernetes/ssl/admin .pem \ --embed-certs= true \ --client-key= /etc/kubernetes/ssl/admin-key .pem # 设置上下文参数 $ kubectl config set -context kubernetes \ --cluster=kubernetes \ --user=admin # 设置默认上下文 $ kubectl config use-context kubernetes |
kube-apiserver
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
$ vi /etc/systemd/system/kube-apiserver .service [Unit] Description=Kubernetes API Service Documentation=https: //github .com /kubernetes/kubernetes After=network.target After=etcd.service [Service] EnvironmentFile=- /etc/kubernetes/config EnvironmentFile=- /etc/kubernetes/apiserver ExecStart= /usr/local/bin/kube-apiserver \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ $KUBE_ETCD_SERVERS \ $KUBE_API_ADDRESS \ $KUBE_API_PORT \ $KUBELET_PORT \ $KUBE_ALLOW_PRIV \ $KUBE_SERVICE_ADDRESSES \ $KUBE_ADMISSION_CONTROL \ $KUBE_API_ARGS Restart=on-failure Type=notify LimitNOFILE=65536 [Install] WantedBy=multi-user.target $ vi /etc/kubernetes/config ### # kubernetes system config # # The following values are used to configure various aspects of all # kubernetes services, including # # kube-apiserver.service # kube-controller-manager.service # kube-scheduler.service # kubelet.service # kube-proxy.service # logging to stderr means we get it in the systemd journal KUBE_LOGTOSTDERR= "--logtostderr=true" # journal message level, 0 is debug KUBE_LOG_LEVEL= "--v=0" # Should this cluster be allowed to run privileged docker containers KUBE_ALLOW_PRIV= "--allow-privileged=true" # How the controller-manager, scheduler, and proxy find the apiserver KUBE_MASTER= "--master=http://192.168.151.41:8080" $ vi /etc/kubernetes/apiserver ### ## kubernetes system config ## ## The following values are used to configure the kube-apiserver ## # ## The address on the local server to listen to. KUBE_API_ADDRESS= "--advertise-address=192.168.151.41 --bind-address=192.168.151.41 --insecure-bind-address=192.168.151.41" # ## The port on the local server to listen on. KUBE_API_PORT= "--insecure-port=8080 --secure-port=6443" # ## Port minions listen on #KUBELET_PORT="--kubelet-port=10250" # ## Comma separated list of nodes in the etcd cluster KUBE_ETCD_SERVERS= "--etcd-servers=https://192.168.151.41:2379,https://192.168.151.40:2379,https://192.168.151.38:2379 --etcd-cafile=/etc/kubernetes/ssl/ca.pem --etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem --etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem" # ## Address range to use for services KUBE_SERVICE_ADDRESSES= "--service-cluster-ip-range=10.1.0.0/16" # ## default admission control policies KUBE_ADMISSION_CONTROL= "--admission-control=ServiceAccount,NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota" # ## Add your own! KUBE_API_ARGS= "--authorization-mode=RBAC --runtime-config=rbac.authorization.k8s.io/v1beta1 --service-node-port-range=30000-32767 --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem --client-ca-file=/etc/kubernetes/ssl/ca.pem --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem --enable-swagger-ui=true --apiserver-count=3 --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/var/lib/audit.log --event-ttl=1h --kubelet-https=true --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem --kubelet-client-certificate=/etc/kubernetes/ssl/kubernetes.pem --kubelet-client-key=/etc/kubernetes/ssl/kubernetes-key.pem" $ systemctl daemon-reload $ systemctl enable kube-apiserver $ systemctl start kube-apiserver $ systemctl status kube-apiserver |
kube-controler-manager
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
$ vi /etc/systemd/system/kube-controller-manager .service [Unit] Description=Kubernetes Controller Manager Documentation=https: //github .com /kubernetes/kubernetes [Service] EnvironmentFile=- /etc/kubernetes/config EnvironmentFile=- /etc/kubernetes/controller-manager ExecStart= /usr/local/bin/kube-controller-manager \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ $KUBE_MASTER \ $KUBE_CONTROLLER_MANAGER_ARGS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target $ vi /etc/kubernetes/controller-manager ### # The following values are used to configure the kubernetes controller-manager # defaults from config and apiserver should be adequate # Add your own! KUBE_CONTROLLER_MANAGER_ARGS= "--address=127.0.0.1 --service-cluster-ip-range=10.1.0.0/16 --cluster-name=kubernetes --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem --leader-elect=true" $ systemctl daemon-reload $ systemctl enable kube-controller-manager $ systemctl start kube-controller-manager |
kube-scheduler
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
$ vi /etc/systemd/system/kube-scheduler .service [Unit] Description=Kubernetes Scheduler Plugin Documentation=https: //github .com /kubernetes/kubernetes [Service] EnvironmentFile=- /etc/kubernetes/config EnvironmentFile=- /etc/kubernetes/scheduler ExecStart= /usr/local/bin/kube-scheduler \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ $KUBE_MASTER \ $KUBE_SCHEDULER_ARGS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target $ vi /etc/kubernetes/scheduler ### # kubernetes scheduler config # default config should be adequate # Add your own! KUBE_SCHEDULER_ARGS= "--leader-elect=true --address=127.0.0.1" $ systemctl daemon-reload $ systemctl enable kube-scheduler $ systemctl start kube-scheduler |
Flannel
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
|
$ yum install -y flannel $ vi /etc/systemd/system/flannel .service [Unit] Description=Flanneld overlay address etcd agent After=network.target After=network-online.target Wants=network-online.target After=etcd.service Before=docker.service [Service] Type=notify EnvironmentFile= /etc/flannel/flanneld ExecStart= /usr/bin/flanneld-start \ -etcd-endpoints=${ETCD_ENDPOINTS} \ -etcd-prefix=${ETCD_PREFIX} \ $FLANNEL_OPTIONS ExecStartPost= /usr/libexec/flannel/mk-docker-opts .sh -i Restart=on-failure [Install] WantedBy=multi-user.target RequiredBy=docker.service $ vi /etc/flannel/flanneld # Flanneld configuration options # etcd url location. Point this to the server where etcd runs ETCD_ENDPOINTS= "https://192.168.151.41:2379,https://192.168.151.40:2379,https://192.168.151.38:2379" # etcd config key. This is the configuration key that flannel queries # For address range assignment ETCD_PREFIX= "/kube-flannel/network" # Any additional options that you want to pass FLANNEL_OPTIONS= "-etcd-cafile=/etc/kubernetes/ssl/ca.pem -etcd-certfile=/etc/kubernetes/ssl/kubernetes.pem -etcd-keyfile=/etc/kubernetes/ssl/kubernetes-key.pem" # docker.service 增加以下 EnvironmentFile=- /run/flannel/docker EnvironmentFile=- /run/docker_opts . env EnvironmentFile=- /run/flannel/subnet . env # docker 配置cgroup为systemd ExecStart= /usr/bin/dockerd -H tcp: //0 .0.0.0:2375 -H unix: ///var/run/docker .sock --api-cors-header= '*' -- exec -opt native.cgroupdriver=systemd $ etcdctl \ --ca- file = /etc/kubernetes/ssl/ca .pem \ --cert- file = /etc/kubernetes/ssl/kubernetes .pem \ --key- file = /etc/kubernetes/ssl/kubernetes-key .pem \ mkdir /kube-flannel/network etcdctl \ --ca- file = /etc/kubernetes/ssl/ca .pem \ --cert- file = /etc/kubernetes/ssl/kubernetes .pem \ --key- file = /etc/kubernetes/ssl/kubernetes-key .pem \ mk /kube-flannel/network/config '{"Network":"172.30.0.0/16","SubnetLen":24,"Backend":{"Type":"vxlan"}}' $ systemctl daemon-reload $ systemctl start flannel $ systemctl status flannel $ systemctl restart docker $ etcdctl \ --ca- file = /etc/kubernetes/ssl/ca .pem \ --cert- file = /etc/kubernetes/ssl/kubernetes .pem \ --key- file = /etc/kubernetes/ssl/kubernetes-key .pem \ ls /kube-flannel/network/subnets |
kubelet
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
|
$ vi /etc/systemd/system/kubelet .service [Unit] Description=Kubernetes Kubelet Server Documentation=https: //github .com /kubernetes/kubernetes After=docker.service Requires=docker.service [Service] WorkingDirectory= /var/lib/kubelet EnvironmentFile=- /etc/kubernetes/config EnvironmentFile=- /etc/kubernetes/kubelet ExecStart= /usr/local/bin/kubelet \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ $KUBELET_API_SERVER \ $KUBELET_ADDRESS \ $KUBELET_PORT \ $KUBELET_HOSTNAME \ $KUBE_ALLOW_PRIV \ $KUBELET_POD_INFRA_CONTAINER \ $KUBELET_ARGS Restart=on-failure [Install] WantedBy=multi-user.target $ vi /etc/kubernetes/kubelet ### ## kubernetes kubelet (minion) config # ## The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) KUBELET_ADDRESS= "--address=192.168.151.40" # ## The port for the info server to serve on KUBELET_PORT= "--port=10250 --client-ca-file=/etc/kubernetes/ssl/ca.pem" # ## You may leave this blank to use the actual hostname KUBELET_HOSTNAME= "--hostname-override=192.168.151.40" # ## location of the api-server #KUBELET_API_SERVER="--api-servers=https://192.168.151.41:6443" # ## pod infrastructure container KUBELET_POD_INFRA_CONTAINER= "--pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0" # ## Add your own! KUBELET_ARGS= "--allow-privileged=true --cgroup-driver=systemd --cluster-dns=10.1.0.2 --cluster-domain=cluster.local --cert-dir=/etc/kubernetes/ssl --hairpin-mode promiscuous-bridge --serialize-image-pulls=false --kubeconfig=/etc/kubernetes/kubelet.kubeconfig --require-kubeconfig --cert-dir=/etc/kubernetes/ssl" $ docker pull index.tenxcloud.com /google_containers/pause-amd64 :3.0 $ docker tag index.tenxcloud.com /google_containers/pause-amd64 :3.0 gcr.io /google_containers/pause-amd64 :3.0 $ docker rmi index.tenxcloud.com /google_containers/pause-amd64 :3.0 $ systemctl daemon-reload $ systemctl enable kubelet $ systemctl start kubelet $ systemctl status kubelet |
kube-proxy
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
|
$ vi /etc/systemd/system/kube-proxy .service [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https: //github .com /kubernetes/kubernetes After=network.target [Service] EnvironmentFile=- /etc/kubernetes/config EnvironmentFile=- /etc/kubernetes/proxy ExecStart= /usr/local/bin/kube-proxy \ $KUBE_LOGTOSTDERR \ $KUBE_LOG_LEVEL \ $KUBE_MASTER \ $KUBE_PROXY_ARGS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target $ vi /etc/kubernetes/proxy ### # kubernetes proxy config # default config should be adequate # Add your own! KUBE_PROXY_ARGS= "--bind-address=192.168.151.40 --hostname-override=192.168.151.40 --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig --cluster-cidr=10.1.0.0/16" # 设置集群参数 $ kubectl config set -cluster kubernetes \ --certificate-authority= /etc/kubernetes/ssl/ca .pem \ --embed-certs= true \ --server=${KUBE_APISERVER} # 设置客户端认证参数 $ kubectl config set -credentials proxy \ --client-certificate= /etc/kubernetes/ssl/kube-proxy .pem \ --embed-certs= true \ --client-key= /etc/kubernetes/ssl/kube-proxy-key .pem # 设置上下文参数 $ kubectl config set -context kube-proxy \ --cluster=kubernetes \ --user=proxy # 设置默认上下文 $ kubectl config use-context kube-proxy $ systemctl daemon-reload $ systemctl enable kube-proxy $ systemctl start kube-proxy $ systemctl status kube-proxy |
集群测试
1
|
$ kubectl run nginx --replicas=2 --labels= "run=load-balancer-example" --image=nginx |