目录

一、初始化

1. 主机名设置(所有节点)

2. 免秘钥登录(步骤略)

3. 系统配置初始化(所有节点)

4. 初始化证书(只在k8s-node1执行)

二、安装

1. Etcd安装配置(master节点上执行)

2. Flannel安装配置

3. Docker安装配置

4. Kubernetes 安装配置


目前社区提供了很多工具自动化的安装部署Kubernetes,包括kubespray, kubeadm等

我们之前还是手动部署的,此次做记录,本次安装各个组件的版本

组件版本说明
etcd3.3.11 
kubernetes1.13.0 
docker-ce18.06.1.ce 
flannel0.11.0 

 

 

 

 

 

 

一、初始化

1. 主机名设置(所有节点)

本次三个Node节点,其中k8s-node1同时作为master和node

k8s-node1172.16.9.201master,node
k8s-node2172.16.9.202node
k8s-node3172.16.9.203node

 

 

 

 

在/etc/hosts中加入节点信息

172.16.9.201 k8s-node1
172.16.9.202 k8s-node2
172.16.9.203 k8s-node3

2. 免秘钥登录(步骤略)

3. 系统配置初始化(所有节点)

k8s的配置

cat > /etc/sysctl.d/kubernetes.conf <<-EOF
net.ipv4.ip_forward = 1
net.ipv4.conf.all.route_localnet = 1
# in case that arp cache overflow in a latget cluster!
net.ipv4.neigh.default.gc_thresh1 = 70000
net.ipv4.neigh.default.gc_thresh2 = 80000
net.ipv4.neigh.default.gc_thresh3 = 90000
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.file-max = 65535
# es requires vm.max_map_count to be at least 262144.
vm.max_map_count = 262144
# kubelet requires swap off.
# https://github.com/kubernetes/kubernetes/issues/53533
vm.swappiness = 0
EOF

sysctl -p /etc/sysctl.d/kubernetes.conf

nginx配置(我们使用nginx作为反向代理)

cat > /etc/sysctl.d/nginx.conf <<-EOF
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 262144
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_max_tw_buckets = 5000
net.ipv4.tcp_max_orphans = 262144
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syn_retries = 1
EOF

sysctl -p /etc/sysctl.d/nginx.conf

系统配置

swapoff -a
sed -i -r 's|^\S+\s+swap\s+swap.*|# &|' /etc/fstab

# modify the maxium number of files that can be opened by process
# to avoid the nginx process of 'nginx-ingress-controller'
# failed to set 'worker_rlimit_nofile' to '94520' in 0.12.0+
sed -i -r '/^\* (soft|hard) nofile/d' /etc/security/limits.conf
echo "* soft nofile 100000" >> /etc/security/limits.conf
echo "* hard nofile 200000" >> /etc/security/limits.conf

systemctl disable firewalld.service
systemctl stop firewalld.service

# clean up the existed iptables rules.
iptables -F && iptables -F -t nat
iptables -X && iptables -X -t nat

sed -i -r 's|^(SELINUX=).*|\1disabled|' /etc/selinux/config
setenforce 0

4. 初始化证书(只在k8s-node1执行)

下载软件

curl "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64" -o "/usr/local/bin/cfssl"
curl "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64" -o "/usr/local/bin/cfssljson"
curl "https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64" -o "/usr/local/bin/cfssl-certinfo"

chmod +x "/usr/local/bin/cfssl"
chmod +x "/usr/local/bin/cfssljson"
chmod +x "/usr/local/bin/cfssl-certinfo"

初始化CA签名

cd /etc/kubernetes/ssl

# Generate CA Certificates
cat > ca-config.json <<-EOF
{
    "signing": {
        "default": {
            "expiry": "87600h"
        },
        "profiles": {
            "kubernetes": {
                "usages": [
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ],
                "expiry": "87600h"   # 过期时间,可以自行修改
            }
        }
    }
}
EOF

cat > ca-csr.json <<-EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "WuHan",
            "L": "WuHan",
            "O": "kubernetes",
            "OU": "CA"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json |cfssljson -bare ca

初始化SSL证书

需要生成证书的服务包括 etcd, docker, kube-apiserver, kube-controller-manager, kube-sheduler, kube-proxy, client

以下单独写一个函数用于生成各个证书

# 定义通用函数, 生成ssl证书,其中hosts中的allow_ips表示所有允许访问的IP地址,建议将集群所有的节点及允许访问的client节点的IP地址及主机名都加进去
function generate_ssl_certificates {
    if [[ "$#" -ne 3 ]]; then
        return 1
    fi

    local service_name="${1}"
    local common_name="${2}"
    local organization="${3}"
    local csr_file="${service_name}-csr.json"

    cd /etc/kubernetes/ssl

	cat > "${csr_file}" <<-EOF
	{
	    "CN": "CN",
	    "key": {
	        "algo": "rsa",
	        "size": 2048
	    },
	    "hosts": [
	        "10.10.0.1",
	        "10.10.0.2",
	        "127.0.0.1",
	        "kubernetes",
	        "kubernetes.default",
	        "kubernetes.default.svc",
	        "$ALLOW_IPS"
	    ],
	    "names": [
	        {
	            "C": "CN",
	            "ST": "WuHan",
	            "L": "WuHan",
	            "O": "${organization}",
	            "OU": "kubernetes"
	        }
	    ]
	}
	EOF

    cfssl gencert \
          -ca=ca.pem \
          -ca-key=ca-key.pem \
          -config=ca-config.json \
          -profile=kubernetes \
          "${csr_file}" |cfssljson -bare "${service_name}"
}

# generate the certificate and private key of each services
generate_ssl_certificates etcd etcd etcd
generate_ssl_certificates docker docker docker
generate_ssl_certificates kube-apiserver system:kube-apiserver system:kube-apiserver
generate_ssl_certificates kube-controller-manager system:kube-controller-manager system:kube-controller-manager
generate_ssl_certificates kube-scheduler system:kube-scheduler system:kube-scheduler

# notes: kube-proxy is different from other kubernetes components.
generate_ssl_certificates kube-proxy system:kube-proxy system:node-proxier

# generate the admin client certificate and private key.
generate_ssl_certificates admin admin system:masters

# the kube-controller-manager leverages a key pair to generate and sign service
# account tokens as describe in the managing service accounts documentation.
generate_ssl_certificates service-account service-accounts kubernetes

将生成的证书文件拷贝到集群其他节点

scp /etc/kubernetes/ssl/* k8s-node2:/etc/kubernetes/
scp /etc/kubernetes/ssl/* k8s-node3:/etc/kubernetes/

初始化kubelet的证书(所有的node节点执行)

cd /etc/kubernetes/ssl

cat > kubelet-$(hostname).json <<-EOF
{
    "CN": "system:node:$(hostname)",
    "hosts": [
        "$(hostname)",
        "${host}"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "ST": "WuHan",
            "L": "WuHan",
            "O": "system:nodes",
            "OU": "kubernetes"
        }
    ]
}
EOF

cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      kubelet-$(hostname).json |cfssljson -bare kubelet-$(hostname)

二、安装

1. Etcd安装配置(master节点上执行)

安装etcd

yum install -y -q "etcd-3.3.11"

配置etcd

# 修改etcd.conf文件
vim /etc/etcd/etcd.conf

ETCD_NAME=etcd0
ETCD_DATA_DIR="/var/lib/etcd/etcd0"
ETCD_LISTEN_PEER_URLS="https://172.16.9.201:2380"
ETCD_LISTEN_CLIENT_URLS="https://172.16.9.201:2379,https://127.0.0.1:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://172.16.9.201:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://172.16.9.201:2379"
ETCD_INITIAL_CLUSTER="etcd0=https://172.16.9.201:2380"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_AUTO_COMPACTION_RETENTION="1"

# 本次etcd是单机部署,如果是etcd集群的话则
ETCD_INITIAL_CLUSTER="etcd0=https://172.16.9.201:2380,etcd1=https://172.16.9.202:2380,etcd2=https://172.16.9.203:2380"

# 如果支持https的话,还需要配置如下
mkdir /etc/etcd/ssl
cp /etc/kubernetes/ssl/ca.pem /etc/etcd/ssl/
cp /etc/kubernetes/ssl/etcd.pem /etc/etcd/ssl/
cp /etc/kubernetes/ssl/etcd-key.pem /etc/etcd/ssl/

ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_CLIENT_CERT_AUTH="true"
ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
ETCD_PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/ca.pem"

配置etcdctl

cat >> /root/.bashrc <<-EOF
alias etcdctlv2='ETCDCTL_API=2 etcdctl \
                               --endpoints=https://172.16.9.201:2379 \
                               --ca-file=/etc/etcd/ssl/ca.pem \
                               --cert-file=/etc/etcd/ssl/etcd.pem \
                               --key-file=/etc/etcd/ssl/etcd-key.pem'
alias etcdctlv3='ETCDCTL_API=3 etcdctl \
                               --endpoints=https://172.16.9.201:2379 \
                               --cacert=/etc/etcd/ssl/ca.pem \
                               --cert=/etc/etcd/ssl/etcd.pem \
                               --key=/etc/etcd/ssl/etcd-key.pem'
EOF

source ~/.bashrc

[root@k8s-node1 ~]# etcdctlv2 cluster-health
member cfc35c28cabf1d4e is healthy: got healthy result from https://172.16.9.201:2379
cluster is healthy

2. Flannel安装配置

使用二进制文件在物理机上部署(所有的Node节点)

wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz

tar -xzf flannel-v0.11.0-linux-amd64.tar.gz -C /tmp/flannel
cp /tmp/flannel/flanneld /usr/local/bin
cp /tmp/flannel/mk-docker-opts.sh/usr/local/bin

配置etcd(Master节点)

flannel_config=$(cat <<-EOF | python
import json
conf = dict()
conf['Network'] = '172.17.0.0/16'
conf['SubnetLen'] = 24
conf['Backend'] = {'Type': 'vxlan'}
print(json.dumps(conf))
EOF
)

etcdctlv2 set /k8s.com/network/config "${flannel_config}"

etcdctlv2 get /awcloud.com/network/config
{"Backend": {"Type": "vxlan"}, "Network": "172.17.0.0/16", "SubnetLen": 24}

配置flanneld(所有node节点)

cat > "/etc/systemd/system/flanneld.service" <<-EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
Before=docker.service

[Service]
Type=notify
ExecStart=/usr/local/bin/flanneld \
            -etcd-cafile=/etc/kubernetes/ssl/ca.pem \
            -etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
            -etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
            -etcd-endpoints=https://172.16.9.201:2379 \
            -etcd-prefix=/k8s.com/network \
            -iface=eth0 \
            -ip-masq

ExecStartPost=/usr/local/bin/mk-docker-opts.sh \
            -k DOCKER_NETWORK_OPTIONS \
            -d /run/flannel/docker
Restart=on-failure

[Install]
WantedBy=multi-user.target
WantedBy=docker.service
EOF

systemctl enable flanneld.service
systemctl start flanneld.service

# 查看各个node节点的flannel.1网卡
[root@k8s-node1 ~]# ifconfig flannel.1
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.3.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::3001:e8ff:fed6:a1f6  prefixlen 64  scopeid 0x20<link>
        ether 32:01:e8:d6:a1:f6  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 8 overruns 0  carrier 0  collisions 0

[root@k8s-node2 ~]# ifconfig flannel.1
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.50.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::b050:9ff:fe40:63ca  prefixlen 64  scopeid 0x20<link>
        ether b2:50:09:40:63:ca  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 10 overruns 0  carrier 0  collisions 0

[root@k8s-node3 ~]# ifconfig flannel.1
flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
        inet 172.17.86.0  netmask 255.255.255.255  broadcast 0.0.0.0
        inet6 fe80::a0f6:a6ff:fe7d:bb15  prefixlen 64  scopeid 0x20<link>
        ether a2:f6:a6:7d:bb:15  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 10 overruns 0  carrier 0  collisions 0

# 查看etcd
[root@k8s-node1 ~]# etcdctlv2 ls /awcloud.com/network/subnets
/awcloud.com/network/subnets/172.17.86.0-24
/awcloud.com/network/subnets/172.17.50.0-24
/awcloud.com/network/subnets/172.17.3.0-24

[root@k8s-node1 ~]# etcdctlv2 get /awcloud.com/network/subnets/172.17.86.0-24
{"PublicIP":"172.16.9.203","BackendType":"vxlan","BackendData":{"VtepMAC":"a2:f6:a6:7d:bb:15"}}

[root@k8s-node1 ~]# etcdctlv2 get /awcloud.com/network/subnets/172.17.50.0-24
{"PublicIP":"172.16.9.202","BackendType":"vxlan","BackendData":{"VtepMAC":"b2:50:09:40:63:ca"}}

[root@k8s-node1 ~]# etcdctlv2 get /awcloud.com/network/subnets/172.17.3.0-24
{"PublicIP":"172.16.9.201","BackendType":"vxlan","BackendData":{"VtepMAC":"32:01:e8:d6:a1:f6"}}

3. Docker安装配置

# 配置yum源,也可以下载rpm包
cat > /etc/yum.repos.d/docker-ce.repo <<-EOF
[docker-ce-stable]
name=Docker CE Stable Mirror Repository
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/7/x86_64/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
EOF
 
yum install --enablerepo=docker-ce-stable -y docker-ce-18.06.1.ce
 
# 配置文件
cat > "/etc/systemd/system/docker.service" <<-EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
EnvironmentFile=-/run/flannel/docker
ExecStart=/usr/bin/dockerd \
            $DOCKER_NETWORK_OPTIONS \
            --data-root=/var/lib/docker \
            --host=tcp://172.16.9.202:2375 \
            --host=unix:///var/run/docker.sock \
            --insecure-registry=172.16.9.201:30050 \       # Harbor仓库
            --insecure-registry=k8s.gcr.io \
            --insecure-registry=quay.io \
            --ip-forward=true \
            --live-restore=true \
            --log-driver=json-file \
            --log-level=warn \
            --registry-mirror=https://registry.docker-cn.com \
            --selinux-enabled=false \
            --storage-driver=overlay2 \
            --tlscacert=/etc/kubernetes/ssl/ca.pem \
            --tlscert=/etc/kubernetes/ssl/docker.pem \
            --tlskey=/etc/kubernetes/ssl/docker-key.pem \
            --tlsverify

ExecReload=/bin/kill -s HUP $MAINPID
# need to reset the rule of iptables FORWARD chain to ACCEPT, because
# docker 1.13 changed the default iptables forwarding policy to DROP.
# https://github.com/moby/moby/pull/28257/files
# https://github.com/kubernetes/kubernetes/issues/40182
EnvironmentFile=-/run/flannel/docker
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
# TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target
EOF
 
systemctl enable docker
systemctl start docker

# 查看docker0网卡
[root@k8s-node1 ~]# ifconfig docker0
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.3.1  netmask 255.255.255.0  broadcast 172.17.3.255
        ether 02:42:7d:f2:e9:6d  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@k8s-node2 ~]# ifconfig docker0
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.50.1  netmask 255.255.255.0  broadcast 172.17.50.255
        ether 02:42:a0:33:76:a9  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@k8s-node3 ~]# ifconfig docker0
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 172.17.86.1  netmask 255.255.255.0  broadcast 172.17.86.255
        ether 02:42:ba:e3:f9:41  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

4. Kubernetes 安装配置

本次是通过kubernetes二进制文件安装,下载软件包(在一个节点下载拷贝到其他节点)

wget https://dl.k8s.io/v1.13.4/kubernetes-server-linux-amd64.tar.gz

tar xvzf kubernetes-server-linux-amd64.tar.gz

# 如下拷贝到所有master节点
cp -f kubernetes/server/bin/kubectl /usr/local/bin/
cp -f kubernetes/server/bin/kube-apiserver /usr/local/bin/
cp -f kubernetes/server/bin/kube-controller-manager /usr/local/bin/
cp -f kubernetes/server/bin/kube-scheduler /usr/local/bin/

# 如下拷贝到所有node节点
cp -f kubernetes/server/bin/kubectl /usr/local/bin/
cp -f kubernetes/server/bin/kubelet /usr/local/bin/
cp -f kubernetes/server/bin/kube-proxy /usr/local/bin/

初始化kubeconfig配置(master节点执行)

# 1.Generating the data encryption config and key
encryption_key=$(head -c 32 /dev/urandom |base64)

cat > "/etc/kubernetes/encryption-config.yaml" <<EOF
apiVersion: v1
kind: EncryptionConfig
resources:
- resources:
  - secrets
  providers:
  - aescbc:
      keys:
      - name: key1
        secret: ${encryption_key}
  - identity: {}
EOF

# 2.Generating the kubeconfig file for k8s component
for component in kube-controller-manager kube-scheduler kube-proxy; do
	kubectl config set-cluster kubernetes \
			--embed-certs=true \
			--certificate-authority="/etc/kubernetes/ca.pem" \
			--server=https://172.16.9.201:5443 \
			--kubeconfig="/etc/kubernetes/${component}.kubeconfig"
	
	kubectl config set-credentials "system:${component}" \
			--embed-certs=true \
			--client-certificate=/etc/kubernetes/ssl/${component}.pem \
			--client-key=/etc/kubernetes/ssl/${component}-key.pem \
			--kubeconfig="/etc/kubernetes/${component}.kubeconfig"
	
	kubectl config set-context default \
			--cluster=kubernetes \
			--user="system:${component}" \
			--kubeconfig="/etc/kubernetes/${component}.kubeconfig"
	
	kubectl config use-context default \
			--kubeconfig="/etc/kubernetes/${component}.kubeconfig"
done

# 3.Generating the kubeconfig file for user admin
kubectl config set-cluster kubernetes \
        --embed-certs=true \
        --certificate-authority="/etc/kubernetes/ssl/ca.pem" \
        --server=https://172.16.9.201:5443 \
        --kubeconfig="/etc/kubernetes/admin.kubeconfig"

kubectl config set-credentials admin \
        --embed-certs=true \
        --client-certificate="/etc/kubernetes/ssl/admin.pem" \
        --client-key="/etc/kubernetes/ssl/admin-key.pem" \
        --kubeconfig="/etc/kubernetes/admin.kubeconfig"

kubectl config set-context default \
        --cluster="${KUBE_CLUSTER_NAME}" \
        --user=admin \
        --kubeconfig="/etc/kubernetes/admin.kubeconfig"

kubectl config use-context default \
        --kubeconfig="/etc/kubernetes/admin.kubeconfig"

# 4. copy configfiles to all masters and nodes
scp /etc/kubernetes/*.kubeconfig other_nodes:/etc/kubernetes/
scp /etc/kubernetes/encryption-config.yaml other_nodes:/etc/kubernetes/

安装配置master

kube-apiserver

cat > "/etc/systemd/system/kube-apiserver.service" <<-EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \
            --address=172.16.9.201 \
            --advertise-address=172.16.9.201 \
            --allow-privileged=true \
            --alsologtostderr=true \
            --apiserver-count=1 \
            --authorization-mode=Node,RBAC \
            --bind-address=172.16.9.201 \
            --client-ca-file=/etc/kubernetes/ssl/ca.pem \
            --enable-admission-plugins=Initializers,NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
            --enable-swagger-ui=true \
            --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
            --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
            --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
            --etcd-prefix=/kubernetes \
            --etcd-servers=https://172.16.9.201:2379 \
            --event-ttl=1h \
            --experimental-encryption-provider-config=/etc/kubernetes/encryption-config.yaml \
            --kubelet-certificate-authority=/etc/kubernetes/ssl/ca.pem \
            --kubelet-client-certificate=/etc/kubernetes/ssl/kube-apiserver.pem \
            --kubelet-client-key=/etc/kubernetes/ssl/kube-apiserver-key.pem \
            --kubelet-https=true \
            --insecure-bind-address=172.16.9.201 \
            --insecure-port=7070 \
            --log-dir=/var/log/kubernetes \
            --log-flush-frequency=10s \
            --logtostderr=false \
            --runtime-config=api/all \
            --secure-port=5443 \
            --service-account-key-file=/etc/kubernetes/ssl/service-account.pem \
            --service-cluster-ip-range=10.10.0.0/16 \
            --service-node-port-range=30000-32767 \
            --tls-cert-file=/etc/kubernetes/ssl/kube-apiserver.pem \
            --tls-private-key-file=/etc/kubernetes/ssl/kube-apiserver-key.pem \
            --v=4
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

kube-controller-manager

cat > "/etc/systemd/system/kube-controller-manager.service" <<-EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-controller-manager \
            --address=127.0.0.1 \
            --allocate-node-cidrs=false \
            --alsologtostderr=true \
            --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
            --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
            --bind-address=127.0.0.1 \
            --cluster-cidr=172.17.0.0/16 \
            --cluster-name=kubernetes \
            --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
            --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
            --controller-start-interval=0 \
            --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
            --leader-elect=true \
            --leader-elect-lease-duration=15s \
            --leader-elect-renew-deadline=10s \
            --leader-elect-retry-period=2s \
            --log-dir=/var/log/kubernetes \
            --log-flush-frequency=10s \
            --logtostderr=false \
            --node-cidr-mask-size=16 \
            --node-monitor-grace-period=30s \
            --node-monitor-period=3s \
            --pod-eviction-timeout=30s \
            --port=10252 \
            --root-ca-file=/etc/kubernetes/ssl/ca.pem \
            --secure-port=10257 \
            --service-account-private-key-file=/etc/kubernetes/ssl/service-account-key.pem \
            --service-cluster-ip-range=10.10.0.0/16 \
            --tls-cert-file=/etc/kubernetes/ssl/kube-controller-manager.pem \
            --tls-private-key-file=/etc/kubernetes/ssl/kube-controller-manager-key.pem \
            --use-service-account-credentials=true \
            --v=4
Restart=on-failure
RestartSec=5
Type=simple
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

kube-scheduler

cat > "/etc/systemd/system/kube-scheduler.service" <<-EOF
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-scheduler \
            --address=127.0.0.1 \
            --alsologtostderr=true \
            --bind-address=127.0.0.1 \
            --kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \
            --leader-elect=true \
            --leader-elect-lease-duration=15s \
            --leader-elect-renew-deadline=10s \
            --leader-elect-retry-period=2s \
            --log-dir=/var/log/kubernetes \
            --log-flush-frequency=10s \
            --logtostderr=false \
            --port=10251 \
            --secure-port=10259 \
            --tls-cert-file=/etc/kubernetes/ssl/kube-scheduler.pem \
            --tls-private-key-file=/etc/kubernetes/ssl/kube-scheduler-key.pem \
            --v=4
Restart=on-failure
RestartSec=5
Type=simple
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动服务

for svc in kube-{apiserver,controller-manager,scheduler}.service; do
    systemctl enable ${svc}
    systemctl start ${svc}
done

export KUBECONFIG=/etc/kubernetes/admin.kubeconfig

[root@k8s-node1 ~]# kubectl get node
No resources found.
[root@k8s-node1 ~]# kubectl get cs
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}

安装配置node

kube-proxy

cat > "/etc/systemd/system/kube-proxy.service" <<-EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-proxy \
            --alsologtostderr=true \
            --bind-address=172.16.9.201 \
            --cluster-cidr=172.17.0.0/16 \
            --hostname-override= \
            --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \
            --log-dir=/var/log/kubernetes \
            --log-flush-frequency=5s \
            --logtostderr=false \
            --proxy-mode=iptables \
            --v=4
Restart=on-failure
RestartSec=5
Type=simple
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

kubelet

cat > "/etc/systemd/system/kubelet.service" <<-EOF
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/kubernetes/kubernetes
After=docker.service
Requires=docker.service

[Service]
User=root
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
            --address=172.16.9.201 \
            --allow-privileged=true \
            --alsologtostderr=true \
            --client-ca-file=/etc/kubernetes/ssl/ca.pem \
            --cluster-dns=10.10.0.2 \
            --cluster-domain=k8s.local \
            --docker-tls \
            --docker-tls-ca=/etc/kubernetes/ssl/ca.pem \
            --docker-tls-cert=/etc/kubernetes/ssl/docker.pem \
            --docker-tls-key=/etc/kubernetes/ssl/docker-key.pem \
            --fail-swap-on=true \
            --healthz-port=10248 \
            --hostname-override= \
            --image-pull-progress-deadline=30m \
            --kubeconfig=/etc/kubernetes/kubelet-k8s-node1.kubeconfig \
            --log-dir=/var/log/kubernetes \
            --log-flush-frequency=5s \
            --logtostderr=false \
            --pod-infra-container-image=172.16.9.201:30050/kube-system/pause-amd64:3.1 \
            --port=10250 \
            --read-only-port=10255 \
            --register-node=true \
            --root-dir=/var/lib/kubelet \
            --runtime-request-timeout=10m \
            --serialize-image-pulls=false \
            --tls-cert-file=/etc/kubernetes/ssl/kubelet-k8s-node1.pem \
            --tls-private-key-file=/etc/kubernetes/ssl/kubelet-k8s-node1-key.pem \
            --v=4
Restart=on-failure
RestartSec=5
Type=simple
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

启动服务

for svc in {kube-proxy,kubelet}.service; do
    systemctl enable ${svc}
    systemctl start ${svc}
done

[root@k8s-node1 ~]# kubectl get node
NAME        STATUS   ROLES         AGE     VERSION
k8s-node1   Ready    master,node   3m12s   v1.13.4
k8s-node2   Ready    node          3m11s   v1.13.4
k8s-node3   Ready    node          3m10s   v1.13.4

安装coredns

coredns通过容器部署,需要先上传镜像

docker pull coredns/coredns:1.4.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1
# 重新打tag, 我们使用的是harbor,这个镜像地址在kubelet启动脚本中可以配置
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1 172.16.9.201:30050/kube-system/pause-amd64:3.1

准备yaml文件

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
    addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes k8s.local in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: coredns
    version: "1.4.0"
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: coredns
      version: "1.4.0"
  template:
    metadata:
      labels:
        k8s-app: coredns
        version: "1.4.0"
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns:1.4.0
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
      - name: config-volume
        configMap:
          name: coredns
          items:
          - key: Corefile
            path: Corefile

---
apiVersion: v1
kind: Service
metadata:
  name: coredns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: coredns
    version: "1.4.0"
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: coredns
    version: "1.4.0"
  clusterIP: 10.10.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

部署coredns

kubectl create -f coredns.yaml

[root@k8s-node1 ~]# kubectl --namespace=kube-system get pod
NAME                      READY   STATUS    RESTARTS   AGE
coredns-697bc57fb-49nmn   1/1     Running   0          28m
coredns-697bc57fb-hzlj8   1/1     Running   0          28m

部署dashboard

Logo

开源、云原生的融合云平台

更多推荐