k8s-ansible搭建的单master集群
学习笔记
·
kubernetes – 01
kubernetes 安装
proxy/registry仓库初始化
registry 192.168.1.100 1CPU,1G内存
安装仓库服务,初始化仓库
[root@registry ~]# rm -rf /etc/yum.repos.d/*.repo
curl -o /etc/yum.repos.d/CentOS-Base.repo https://repo.huaweicloud.com/repository/conf/CentOS-7-reg.repo
curl -o /etc/yum.repos.d/docker.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache
systemctl stop postfix atd tuned
yum remove -y postfix at audit tuned kexec-tools firewalld-*
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
yum install epel-release -y && yum -y install ansible
swapoff -a
sed -i '/swap/s/^/#/' /etc/fstab
#docker私有仓库,建议使用habor。这里只是为了节约主机资源
yum list --show-duplicates docker-ce #查看所有的版本信息
yum install -y docker-ce
systemctl enable --now docker.service
mkdir -p /etc/docker
cat >/etc/docker/daemon.json <<'EOF'
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://hub-mirror.c.163.com"],
"insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
EOF
systemctl restart docker.service
yum install -y docker-distribution
systemctl enable --now docker-distribution
docker pull busybox
docker tag busybox:latest 192.168.1.100:5000/busybox:latest
docker push 192.168.1.100:5000/busybox:latest
curl http://192.168.1.100:5000/v2/busybox/tags/list
{"name":"busybox","tags":["latest"]} #仓库使用正常
kube-master安装
master 192.168.1.11 2CPU,2G内存
node-0001… 192.168.1.21… 2CPU,2G内存
1、防火墙相关配置
setenforce 0
sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
yum install epel-release -y && yum -y install ansible
swapoff -a
2、配置yum
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://repo.huaweicloud.com/kubernetes/yum/repos/kubernetes-el7-$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://repo.huaweicloud.com/kubernetes/yum/doc/yum-key.gpg https://repo.huaweicloud.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#验证失败可以考虑先注释掉检测
3、安装软件包(master)
安装kubeadm、kubectl、kubelet、docker-ce
[root@master ~]# yum makecache
[root@master ~]# yum -y install kubelet-1.17.6 kubeadm-1.17.6 kubectl-1.17.6 kubernetes-cni-0.7.5 cri-tools-1.13.0 docker-ce --nogpgcheck
#如果注释掉验证,安装 yum install -y kubeadm kubelet kubectl docker-ce --nogpgcheck
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# vim /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://hub-mirror.c.163.com"],
"insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
[root@master ~]# systemctl enable --now docker kubelet
[root@master ~]# docker info |grep Cgroup
Cgroup Driver: systemd
4、镜像导入私有仓库
# 把云盘 kubernetes/v1.17.6/base-images 中的镜像拷贝到 master
[root@master ~]# cd base-images/
[root@master base-image]# for i in *.tar.gz;do docker load -i ${i};done
[root@master ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-proxy v1.17.6 92f9a31ce92a 2 years ago 117MB
k8s.gcr.io/kube-apiserver v1.17.6 c5c678ed2546 2 years ago 171MB
k8s.gcr.io/kube-controller-manager v1.17.6 abded685bb39 2 years ago 161MB
k8s.gcr.io/kube-scheduler v1.17.6 63f08589ff5e 2 years ago 94.4MB
k8s.gcr.io/coredns 1.6.5 70f311871ae1 2 years ago 41.6MB
k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 2 years ago 288MB
k8s.gcr.io/pause 3.1 da86e6ba6ca1 4 years ago 742kB
[root@master base-image]# docker images |awk '$2!="TAG"{print $1,$2}'|while read _f _v;do
docker tag ${_f}:${_v} 192.168.1.100:5000/${_f##*/}:${_v};
docker push 192.168.1.100:5000/${_f##*/}:${_v};
docker rmi ${_f}:${_v};
done
[root@master ~]# curl http://192.168.1.100:5000/v2/_catalog # 查看验证
{"repositories":["busybox","coredns","etcd","kube-apiserver","kube-controller-manager","kube-proxy","kube-scheduler","pause"]}
5、Tab键设置
[root@master ~]# kubectl completion bash >/etc/bash_completion.d/kubectl
[root@master ~]# kubeadm completion bash >/etc/bash_completion.d/kubeadm
[root@master ~]# exit
6、安装IPVS代理软件包
[root@master ~]# yum install -y ipvsadm ipset
7、配置master主机环境
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.11 master
192.168.1.21 node-0001
192.168.1.22 node-0002
192.168.1.23 node-0003
192.168.1.100 registry
[root@master ~]# vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl --system
...
* Applying /etc/sysctl.d/k8s.conf ...
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
8、使用kubeadm部署
[root@master ~]# kubeadm init --dry-run #模拟安装,一般只看前十行
[root@master ~]# kubeadm config print init-defaults > kubeadm-init.yaml
[root@master ~]# vim kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s #token生命周期
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.1.11 #apiserver的IP地址
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: 192.168.1.100:5000 #镜像仓库地址
kind: ClusterConfiguration
kubernetesVersion: v1.17.6 #当前安装的k8s版本
networking:
dnsDomain: cluster.local #默认域名地址
podSubnet: 10.244.0.0/16 #容器地址cidr,新添加
serviceSubnet: 10.254.0.0/16 #服务地址cidr
scheduler: {}
--- #文件最后手动添加如下4行
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs #启用lvs
[root@master init]# kubeadm init --config=kubeadm-init.yaml |tee master-init.log
# 根据提示执行命令
[root@master init]# mkdir -p $HOME/.kube
[root@master init]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master init]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
9、验证安装结果
[root@master ~]# kubectl version
[root@master ~]# kubectl get componentstatuses
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health":"true"}
计算节点安装
1、获取token
# 创建一个永久的token
[root@master ~]# kubeadm token create --ttl=0 --print-join-command #生成新的加入命令
W1003 00:03:58.745216 6234 validation.go:28] Cannot validate kube-proxy config - no validator is available
W1003 00:03:58.745562 6234 validation.go:28] Cannot validate kubelet config - no validator is available
kubeadm join 192.168.1.11:6443 --token njnadt.lf8l3hnvr7a6he1g --discovery-token-ca-cert-hash sha256:fe7860c7ea9546f25003428b1c473271f668f5c5489866e63997c27ba83a0cb6
[root@master ~]# kubeadm token list # 获取token_hash
[root@master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt |openssl rsa -pubin -outform der |openssl dgst -sha256 -hex #查看hash
2、使用ansible安装node
拷贝云盘上 kubernetes/v1.17.6/node-install 到跳板机
[root@registry ~]# ssh-keygen
[root@registry ~]# ssh-copy-id master
[root@registry ~]# ssh-copy-id node-0001 #以后新加入的节点都要先免密
[root@registry ~]# mkdir node-join;cd node-join
[root@registry node-join]# ls
ansible.cfg files hostlist.yaml node_install.yaml
[root@registry node-join]# cat ansible.cfg
[defaults]
inventory = hostlist.yaml
host_key_checking = False
[root@registry node-join]# cat hostlist.yaml
all:
children:
nodes:
hosts:
192.168.1.21: {}
# 192.168.1.22: {}
# 192.168.1.23: {}
ungrouped: {}
[root@registry node-join]# cat node_install.yaml
---
- name:
hosts:
- nodes
vars:
#在脚本执行前需要修改下面三个变量
master: '192.168.1.11:6443'
token: 'njnadt.lf8l3hnvr7a6he1g'
token_hash: 'sha256:fe7860c7ea9546f25003428b1c473271f668f5c5489866e63997c27ba83a0cb6'
tasks:
- name: disable swap
lineinfile:
path: /etc/fstab
regexp: 'swap'
state: absent
notify: disable swap
- name: swapoff
shell: 'swapoff -a'
- name: Ensure SELinux is set to disabled mode
lineinfile:
path: /etc/selinux/config
regexp: '^SELINUX='
line: SELINUX=disabled
notify: disable selinux
- name: remove the firewalld
yum:
name:
- firewalld
- firewalld-filesystem
state: absent
- name: install k8s node tools
#确保安装配置好yum仓库,或者手动安装
#yum -y install kubelet-1.17.6 kubeadm-1.17.6 kubectl-1.17.6 kubernetes-cni-0.7.5 cri-tools-1.13.0 docker-ce --nogpgcheck
yum:
name:
- kubeadm-1.17.6
- kubelet-1.17.6
- docker-ce
- ipvsadm
- ipset
state: present
update_cache: yes
- name: Create a directory if it does not exist
file:
path: /etc/docker
state: directory
mode: '0755'
- name: Copy file with /etc/hosts
copy:
src: files/hosts
dest: /etc/hosts
owner: root
group: root
mode: '0644'
- name: Copy file with /etc/docker/daemon.json
copy:
src: files/daemon.json
dest: /etc/docker/daemon.json
owner: root
group: root
mode: '0644'
- name: Copy file with /etc/sysctl.d/k8s.conf
copy:
src: files/k8s.conf
dest: /etc/sysctl.d/k8s.conf
owner: root
group: root
mode: '0644'
notify: enable sysctl args
- name: enable k8s node service
service:
name: "{{ item }}"
state: started
enabled: yes
with_items:
- docker
- kubelet
- name: check node state
stat:
path: /etc/kubernetes/kubelet.conf
register: result
- name: node join
shell: kubeadm join '{{ master }}' --token '{{ token }}' --discovery-token-ca-cert-hash '{{ token_hash }}'
when: result.stat.exists == False
handlers:
- name: disable swap
shell: swapoff -a
- name: disable selinux
shell: setenforce 0
- name: enable sysctl args
shell: sysctl --system
[root@registry node-install]# cat files/hosts
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
192.168.1.11 master
192.168.1.21 node-0001
192.168.1.22 node-0002
...
192.168.1.100 registry
[root@registry node-join]# cat files/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://hub-mirror.c.163.com"],
"insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
[root@registry node-join]# cat files/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@registry node-install]# ansible-playbook node_install.yaml
3、验证安装
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady master 38m v1.17.6
node1 NotReady <none> 20s v1.17.6
网络插件安装配置
拷贝云盘 kubernetes/v1.17.6/flannel 目录到 master 上
1、上传镜像到私有仓库
[root@master ~]# cd flannel
[root@master flannel]# docker load -i flannel.tar.gz
[root@master flannel]# docker tag quay.io/coreos/flannel:v0.12.0-amd64 192.168.1.100:5000/flannel:v0.12.0-amd64
[root@master flannel]# docker push 192.168.1.100:5000/flannel:v0.12.0-amd64
2、修改配置文件并安装
[root@master flannel]# vim kube-flannel.yml #kube-flannel.yml放文章最后
128: "Network": "10.244.0.0/16", # 这个跟kubeadm-init.yaml里的podSubnet一致
172: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
186: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
190: - --iface=ens33 #网卡名默认eth0,不是的话,args下新增的一行
227-结尾: 删除
[root@master flannel]# kubectl apply -f kube-flannel.yml
3、验证结果
[root@master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready master 56m v1.17.6
node1 Ready <none> 18m v1.17.6
[root@master ~]# kubectl get pod -nkube-system -w
NAME READY STATUS RESTARTS AGE
coredns-f6bfd8d46-k8hnj 1/1 Running 0 56m
coredns-f6bfd8d46-rftwb 1/1 Running 0 56m
etcd-master 1/1 Running 0 56m
kube-apiserver-master 1/1 Running 0 56m
kube-controller-manager-master 1/1 Running 0 56m
kube-flannel-ds-amd64-blz7v 1/1 Running 0 80s
kube-flannel-ds-amd64-ztcbk 1/1 Running 0 80s
kube-proxy-g2c79 1/1 Running 2 19m
kube-proxy-jpdvg 1/1 Running 1 56m
kube-scheduler-master 1/1 Running 0 56m
[root@master ~]# docker pull nginx
[root@master ~]# docker tag nginx:latest 192.168.1.100:5000/nginx:latest
[root@master ~]# docker push 192.168.1.100:5000/nginx:latest
[root@master ~]# kubectl apply -f nginx.yml
deployment.apps/nginx-example created
service/nginx-service configured
[root@master ~]# kubectl get pod -w
NAME READY STATUS RESTARTS AGE
nginx-example-689875b956-drmk4 1/1 Running 0 6s
^C[root@master ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.254.0.1 <none> 443/TCP 67m
nginx-service NodePort 10.254.161.216 <none> 80:30476/TCP 61s
浏览器访问192.168.1.21:30476,看到nginx页面
nginx.yml
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-example
spec:
selector:
matchLabels:
app: my-nginx
replicas: 1
template:
metadata:
labels:
app: my-nginx
spec:
containers:
- name: nginx
image: 192.168.1.100:5000/nginx
ports:
- protocol: TCP
containerPort: 80
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: my-nginx
type: NodePort
kube-flannel.yml
[root@master ~]# cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: 192.168.1.100:5000/flannel:v0.12.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: 192.168.1.100:5000/flannel:v0.12.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
- --iface=ens33
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
更多推荐
已为社区贡献24条内容
所有评论(0)