一、环境准备
1.关闭防火墙

systemctl stop firewalld
systeemctl disable firewalld

2.关闭swap

swapoff -a
vi /etc/fstab
删除/dev/mapper/centos-swap swap                    swap    defaults        0 0
[root@k8s-master1 ~]# free -m
              total        used        free      shared  buff/cache  available
Mem:          1823        123        1468          8        231        1516
Swap:            0          0          0
--可以看到Swap为0

3.配置主机名
hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2

4.配置名称解析

vi /etc/hosts
192.168.142.115 k8s-master1
192.168.142.116 k8s-master2
192.168.142.118 k8s-node1
192.168.142.119 k8s-node2

5.关闭selinux

setenforce 0
vi /etc/selinux/config 
SELINUX=disabled

6.配置时间同步
选择一个节点作为服务端,其他节点为客户端
master1为时间服务器的服务端
其他的为时间服务器的客户端
1)配置k8s-master1

yum install chrony -y
vi /etc/chrony.conf
 server 127.127.1.0 iburst
  #server 1.centos.pool.ntp.org iburst
  #server 2.centos.pool.ntp.org iburst
  #server 3.centos.pool.ntp.org iburst
 allow 192.168.142.0/24
 local stratum 10
 [root@k8s-master1 etc]# systemctl start chronyd
[root@k8s-master1 etc]# systemctl enable chronyd
[root@k8s-master1 etc]# ss -unl | grep 123
[root@k8s-master1 etc]# systemctl restart chronyd 
[root@k8s-master1 etc]# ss -unl | grep 123       
UNCONN     0      0            *:123       

2)配置k8s-node1等

yum install chrony -y
vi /etc/chrony.conf
 server 192.168.142.115 iburst
  #server 1.centos.pool.ntp.org iburst
  #server 2.centos.pool.ntp.org iburst
  #server 3.centos.pool.ntp.org iburst
 
 [root@k8s-node1 etc]# systemctl start chronyd
[root@k8s-master1 etc]# systemctl enable chronyd

[root@k8s-master1 etc]# systemctl restart chronyd 
  

查看是否连接master
chronyc sources

二、安装etcd
1.给etcd颁发证书
1)创建证书颁发机构
2)填写表单–写明etcd所在的节点的ip
3)向证书颁发机构申请证书

第一步:上传TLS安装包
			传到/root下
			略
		第二步:
			# tar xvf /root/TLS.tar.gz
			# cd /root/TLS
			# ./cfssl.sh
 			# cd etcd 	
			# vim server-csr.json 
				修改host中的IP地址,这里的IP是etcd所在节点的IP地址
				{
				    "CN": "etcd",
				    "hosts": [
				        "192.168.31.63",
				        "192.168.31.65",
				        "192.168.31.66"
				        ],
				    "key": {
				        "algo": "rsa",
				        "size": 2048
				    },
				    "names": [
				        {
				            "C": "CN",
				            "L": "BeiJing",
				            "ST": "BeiJing"
				        }
				    ]
				}
			# ./generate_etcd_cert.sh
			# ls *pem
				ca-key.pem  ca.pem  server-key.pem  server.pem

ca.pem:证书-公钥
ca-key.pem:私钥
任何人申请证书,需要有ca.pem

2.安装etcd
etcd在master1,node1,node2上安装
解压之后,会生成一个文件etcd.service和一个目录etcd,

etcd.service是systemd服务管理脚本。
centos7:systemd服务管理脚本在哪个目录?
/usr/lib/systemd/system

--将etcd加入systemctl管理
cp etcd.service /usr/lib/systemd/system/

--修改etcd配置文件
cp -r etcd /opt/
 vi  /opt/etcd/cfg/etcd.conf
--修改证书
 cd  /root/TLS/etcd/
 \cp ca.pem server.pem server-key.pem /opt/etcd/ssl/

--将etc管理程序和程序目录发送到node1 和node2
		# scp /usr/lib/systemd/system/etcd.service root@k8s-node1:/usr/lib/systemd/system/
		# scp /usr/lib/systemd/system/etcd.service root@k8s-node2:/usr/lib/systemd/system/
		# scp -r /opt/etcd/ root@k8s-node1:/opt/
		# scp -r /opt/etcd/ root@k8s-node2:/opt/

---在node1和node2上修改etcd的配置文件
		# vi /opt/etcd/cfg/etcd.conf
--在三个节点一次启动etcd服务
		# systemctl start etcd
	   	# systemctl enable etcd
--检查是否启动成功
		# /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.142.115:2379,https://192.168.142.118:2379,https://192.168.142.118:2379" cluster-health

3.安装master服务
(1)为api server签发证书

    # cd /root/TLS/k8s/
	# ./generate_k8s_cert.sh 

(2)
#tar xvf k8s-master.tar.gz
#mv kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system/
#mv kubernetes /opt/
# cp /root/TLS/k8s/{ca*pem,server.pem,server-key.pem} /opt/kubernetes/ssl/ -rvf

修改apiserver的配置文件
# vi /opt/kubernetes/cfg/kube-apiserver.conf 
    KUBE_APISERVER_OPTS="--logtostderr=false \ --不将错误信息打印到标准窗口上
--v=2 \ --日志级别,越大越详细
--log-dir=/opt/kubernetes/logs \--日志目录
KUBE_APISERVER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--etcd-servers=https://192.168.142.115:2379,https://192.168.142.118:2379,https://192.168.142.119:2379 \
--bind-address=192.168.31.61 \ --监听安全端口的IP地址。关联的接口必须能被集群的其他部分以及CLI/web客户机访问
--secure-port=6443 \ --监控端口
--advertise-address=192.168.31.61 \ --通告端口
--allow-privileged=true \ --是否允许 privileged 容器运行,允许特权模式的容器,是否允许超级管理员创建容器
--service-cluster-ip-range=10.0.0.0/24 \ --service 要使用的网段,使用 CIDR 格式,参考 kubernetes 中 service 的定义
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \ --除了默认启用的插件,还应该额外启动的admission插件
--authorization-mode=RBAC,Node \
--enable-bootstrap-token-auth=true \--基于bootstrap token方式进行自动颁发证书
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-32767 \
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
--访问kubelet所使用的证书

--tls-cert-file=/opt/kubernetes/ssl/server.pem  \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--访问api server所需要的证书

--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
--访问etcd所需要的证书

--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
修改kube-controller-manager:
 vi /opt/kubernetes/cfg/kube-controller-manager.conf 
 KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect=true \ --启动leader选举?有多个api server,会自动选择一个
--master=127.0.0.1:8080 \指定了api server的地址
--address=127.0.0.1 \指定controller-manager的地址
--allocate-node-cidrs=true \ --集群内的pod的CIDR范围,需要 --allocate-node-cidrs设为true
--cluster-cidr=10.244.0.0/16 \ --集群内的pod的CIDR范围,需要 --allocate-node-cidrs设为true
--service-cluster-ip-range=10.0.0.0/24 \ --注意kube-apiserver.conf里的值 
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \

--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--experimental-cluster-signing-duration=87600h0m0s" --证书有效期,这里与生成证书时ca-config.json保持一致
修改kube-scheduler:
vi kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect \ --选择leader
--master=127.0.0.1:8080 \ --Kubernetes api server的地址
--address=127.0.0.1" --kube-scheduler 监听地址

启动master

# systemctl start kube-apiserver
		# systemctl enable kube-apiserver
		
		# systemctl start kube-scheduler
		# systemctl enable kube-scheduler
		
		# systemctl start kube-controller-manager
		# systemctl enable kube-controller-manager
		
		# cp /opt/kubernetes/bin/kubectl /bin/
	验证是否启动成功
root       1617  4.3 18.9 549412 353504 ?       Ssl  15:49   0:10 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://192.168.142.115:2379,https://192.168.142.118:2379,https://192.168.142.119:2379 --bind-address=192.168.142.115 --secure-port=6443 --advertise-address=192.168.142.115 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log
root       1681  0.9  3.2 221368 60796 ?        Ssl  15:49   0:02 /opt/kubernetes/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
root       1707  4.1  1.4 146820 26556 ?        Ssl  15:52   0:01 /opt/kubernetes/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1

查看启动日志:

[root@k8s-master1 cfg]# tail -f /opt/kubernetes/logs/kube-apiserver.INFO 
[+]poststarthook/kube-apiserver-autoregistration ok
[+]autoregister-completion ok
[+]poststarthook/apiservice-openapi-controller ok
healthz check failed
I0410 15:49:20.149045    1617 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:cloud-provider in kube-system
I0410 15:49:20.189575    1617 storage_rbac.go:308] created rolebinding.rbac.authorization.k8s.io/system:controller:token-cleaner in kube-system
W0410 15:49:20.260004    1617 lease.go:222] Resetting endpoints for master service "kubernetes" to [192.168.142.115]
I0410 15:49:20.260783    1617 controller.go:606] quota admission added evaluator for: endpoints
I0410 15:49:41.909126    1617 cacher.go:771] cacher (*rbac.ClusterRole): 1 objects queued in incoming channel.
I0410 15:49:42.398822    1617 controller.go:606] quota admission added evaluator for: serviceaccounts

[root@k8s-master1 cfg]#  tail -f /opt/kubernetes/logs/kube-scheduler.INFO
I0410 15:52:50.521734    1707 defaults.go:91] TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory
I0410 15:52:50.521743    1707 server.go:162] Starting Kubernetes Scheduler version v1.16.0
I0410 15:52:50.521928    1707 factory.go:294] Creating scheduler from algorithm provider 'DefaultProvider'
I0410 15:52:50.521937    1707 factory.go:382] Creating scheduler with fit predicates 'map[CheckNodeUnschedulable:{} CheckVolumeBinding:{} GeneralPredicates:{} MatchInterPodAffinity:{} MaxAzureDiskVolumeCount:{} MaxCSIVolumeCountPred:{} MaxEBSVolumeCount:{} MaxGCEPDVolumeCount:{} NoDiskConflict:{} NoVolumeZoneConflict:{} PodToleratesNodeTaints:{}]' and priority functions 'map[BalancedResourceAllocation:{} ImageLocalityPriority:{} InterPodAffinityPriority:{} LeastRequestedPriority:{} NodeAffinityPriority:{} NodePreferAvoidPodsPriority:{} SelectorSpreadPriority:{} TaintTolerationPriority:{}]'
W0410 15:52:50.523427    1707 authorization.go:47] Authorization is disabled
W0410 15:52:50.523434    1707 authentication.go:79] Authentication is disabled
I0410 15:52:50.523446    1707 deprecated_insecure_serving.go:51] Serving healthz insecurely on 127.0.0.1:10251
I0410 15:52:50.524314    1707 secure_serving.go:123] Serving securely on [::]:10259
I0410 15:52:51.530570    1707 leaderelection.go:241] attempting to acquire leader lease  kube-system/kube-scheduler...
I0410 15:53:09.113793    1707 leaderelection.go:251] successfully acquired lease kube-system/kube-scheduler


[root@k8s-master1 cfg]#  tail -f /opt/kubernetes/logs/kube-controller-manager.INFO 
Resource=csidrivers storage.k8s.io/v1beta1, Resource=csinodes], removed: []
I0410 15:49:43.298376    1681 shared_informer.go:197] Waiting for caches to sync for garbage collector
I0410 15:49:43.398504    1681 shared_informer.go:204] Caches are synced for garbage collector 
I0410 15:49:43.398517    1681 garbagecollector.go:242] synced garbage collector

kubectl 管理工具

[root@k8s-master1 cfg]# kubectl get cs                      
NAME                 AGE
scheduler            <unknown>
controller-manager   <unknown>
etcd-2               <unknown>
etcd-1               <unknown>
etcd-0               <unknown>

配置tls 基于bootstrap自动颁发证书

# kubectl create clusterrolebinding kubelet-bootstrap \
--clusterrole=system:node-bootstrapper \
--user=kubelet-bootstrap

–对应的是: /opt/kubernetes/cfg/kube-apiserver.conf 中
–enable-bootstrap-token-auth=true
–token-auth-file=/opt/kubernetes/cfg/token.csv
–完成访问kubelet自动颁发证书

===============
加密
(1)对称加密:加密与解密用相同的密钥
(2)非对称加密:加密与解密使用的密钥对,发送使用公钥
()单向加密:只能加密,不能解密,如:md5

SSL:
(1)证书来源
从网络第三方机构购买,通过
自己给自己发证收-自签证书

PKI(Public Key Infrastructure)

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐