1.准备环境
a.关闭防火墙
systemctl stop firewalld
systemctl disable firewalld

#检查
systemctl status firewalld

b.关闭selinx
vi /etc/selinux/config

#检查
getenforce

c.关闭swap
vi /etc/fstab

#检查
free

d.配置主机与IP对应关系
vi /etc/hosts

e.将桥接IPV4流程传递到iptables
cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system

#检查
iptables -vnL

2.kubeadmin模式安装k8s
a.所有节点安装docker/kubeadm/kubelet

#添加阿里云docker源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#列出所有docker可安装版本
yum list docker-ce --showduplicates | sort -r
#安装docker
yum install docker-ce-18.06.1.ce-3.el7
systemctl enable docker
systemctl start docker

#添加阿里云YUM软件源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#安装kubelet、kubeadm和kubectl
yum remove -y kubelet-1.13.1 kubeadm-1.13.1 kubectl-1.13.1
yum install -y kubelet-1.13.3 kubeadm-1.13.3 kubectl-1.13.3
systemctl enable kubelet

#部署master节点
kubeadm init --kubernetes-version=1.13.1 \
--apiserver-advertise-address=192.168.43.205 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.254.0.0/16

#部署网络插件
wget https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml

#部署2个node节点
kubeadm join 192.168.43.100:6443 --token 3n01sl.qjsscml383uzldzn --discovery-token-ca-cert-hash sha256:aa728efa0d4cacf2eb064f05ea3535152a0432a5a18c69cdf312bd646830f1f1

#检查环境
kubectl get node
kubectl get cs

kubectl create deployment nginx --image=nginx
kubeclt expose deployment nginx --port=80 --type=NodePort

#部署dashboard
kubectl apply -f kubernetes-dashboard.yaml

#创建dashboard登录用户
kubectl create serviceaccount dashboard-admin -n kube-system
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin

kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admim/{print $1}')


2.k8s二进制模式安装
a.环境规划
master节点:192.168.43.205 安装(kube-apiserver,kube-controller-manager,kube-scheduler,etcd)
node节点:192.168.43.206 安装(kubelet,kube-proxy,docker,etcd)
node节点:192.168.43.207 安装(kubelet,kube-proxy,docker,etcd)

kubernetes版本:1.9
docker版本:17.12-ce
etcd版本:3.0

操作系统:centos7.4_x64  cpu 2核+2G内存+30G磁盘

b.安装docker
yum remove docker \
    docker-client \
    docker-client-latest \
    docker-common \
    docker-latest \
    docker-latest-logrotate \
    docker-logrotate \
    docker-engine
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum list docker-ce --showduplicates | sort -r

yum install -y docker-ce-17.12.0.ce-1.el7.centos

cat << EOF > /etc/docker/daemon.json
{
  "registry-mirrors":["http://registry.docker-cn.com"],
  "insecure-registries":["192.168.0.210:5000"]
}
EOF

#docker添加阿里库
https://cr.console.aliyun.com/cn-hangzhou/instances/mirrors

cat << EOF > /etc/docker/daemon.json
{
  "registry-mirrors":["https://live9m0e.mirror.aliyuncs.com"]
}
EOF

systemctl daemon-reload
systemctl restart docker

#启动docker容器
systemctl start docker
systemctl enable docker

c.自签TLS证书
#证书分布情况
etcd组件:ca.pem,server.pem,server-key.pem
kube-apiserver组件:ca.pem,server.pem,server-key.pem
kubelet组件:ca.pem,ca-key.pem
kube-proxy组件:ca.pem,kube-proxy.pem,kube-proxy-key.pem
kubectl组件:ca.pem,admin.pem,admin-key.pem

#证书安装
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 cfssl-certinfo_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo

mkdir /root/ssl
cd /root/ssl

cat > ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

cat > ca-csr.json << EOF
{
    "CN": "kubernetes",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
            "OU": "System"
        }
    ]
}
EOF

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

cat > server-csr.json << EOF
{
    "CN": "kubernetes",
    "hosts": [
      "127.0.0.1",
      "192.168.43.205",
      "192.168.43.206",
      "192.168.43.207",
      "kubernetes.default",
      "kubernetes.default.svc",
      "kubernetes.default.svc.cluster",
      "kubernetes.default.svc.cluster.local"
    ],
    "key": {
      "algo": "rsa",
      "size": 2048
    },
    "names": [{
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server

cat > admin-csr.json << EOF
{
    "CN": "admin",
    "hosts": [],
    "key": {
      "algo": "rsa",
      "size": 2048
    },
    "names": [{
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "system:masters",
      "OU": "System"
    }]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin

cat > kube-proxy-csr.json << EOF
{
    "CN": "system:kube-proxy",
    "hosts": [],
    "key": {
      "algo": "rsa",
      "size": 2048
    },
    "names": [{
      "C": "CN",
      "L": "BeiJing",
      "ST": "BeiJing",
      "O": "k8s",
      "OU": "System"
    }]
}
EOF

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy

d.部署ETCD集群
#创建初始目录
mkdir /opt/kubernetes/
mkdir /opt/kubernetes/{bin,cfg,ssl}

#下载etcd二进制包并解压
tar xzvf etcd-v3.2.12-linux-amd64.tar.gz
mv etcd /opt/kubernetes/bin/
mv etcdctl /opt/kubernetes/bin/

#创建etcd.service
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
ExecStart=/opt/kubernetes/bin/etcd \
--name=etcd01 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=https://192.168.43.205:2380 \
--listen-client-urls=https://192.168.43.205:2379,http://127.0.0.1:2379 \
--advertise-client-urls=https://192.168.43.205:2379 \
--initial-advertise-peer-urls=https://192.168.43.205:2380 \
--initial-cluster-token=etcd-cluster \
--initial-cluster=etcd01=https://192.168.43.205:2380,etcd02=https://192.168.43.206:2380,etcd03=https://192.168.43.207:2380 \
--initial-cluster-state=new \
--cert-file=/opt/kubernetes/ssl/server.pem \
--key-file=/opt/kubernetes/ssl/server-key.pem \
--peer-cert-file=/opt/kubernetes/ssl/server.pem \
--peer-key-file=/opt/kubernetes/ssl/server-key.pem \
--trusted-ca-file=/opt/kubernetes/ssl/ca.pem \
--peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

EOF

#启动etcd服务
systemctl start etcd.service
systemctl enable etcd.service


#检查etcd集群情况
/opt/kubernetes/bin/etcdctl  \
--endpoints=https://192.168.43.205:2379 \
--cert-file=/opt/kubernetes/ssl/server.pem \
--ca-file=/opt/kubernetes/ssl/ca.pem \
--key-file=/opt/kubernetes/ssl/server-key.pem \
cluster-health

/opt/kubernetes/bin/etcdctl \
--endpoints=https://192.168.43.205:2379 \
--cert-file=/opt/kubernetes/ssl/server.pem \
--ca-file=/opt/kubernetes/ssl/ca.pem \
--key-file=/opt/kubernetes/ssl/server-key.pem \
member list


e.部署flannel网络
#下载二进制安装包(节点安装)
tar -xzvf flannel-v0.7.1-linux-amd64.tar.gz -C flannel
cp flannel/{flanneld,mk-docker-opts.sh} /opt/kubernetes/bin

#写入分配的子网段到etcd
/opt/kubernetes/bin/etcdctl \
--endpoints="https://192.168.43.205:2379,https://192.168.43.206:2379,https://192.168.43.207:2379" \
--ca-file=/opt/kubernetes/ssl/ca.pem \
--cert-file=/opt/kubernetes/ssl/server.pem \
--key-file=/opt/kubernetes/ssl/server-key.pem \
set /coreos.com/network/config '{"Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
  
#flannel启动文件
cat > flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service


[Service]
Type=notify
ExecStart=/root/local/bin/flanneld \\
  -etcd-cafile=/etc/kubernetes/ssl/ca.pem \\
  -etcd-certfile=/etc/flanneld/ssl/flanneld.pem \\
  -etcd-keyfile=/etc/flanneld/ssl/flanneld-key.pem \\
  -etcd-endpoints=${ETCD_ENDPOINTS} \\
  -etcd-prefix=${FLANNEL_ETCD_PREFIX}
ExecStartPost=/root/local/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure


[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

systemctl start flannel.service 
systemctl enable flannel.service

#修改docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

systemctl daemon-reload 
systemctl restart docker.service 

f.创建node节点kubeconfig文件

#创建TLS自动获取证书凭证
export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
cat > token.csv <<EOF
${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF

#创建节点bootstrap.kubeconfig
/opt/kubernetes/bin/kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.43.205:6443 \
--kubeconfig=bootstrap.kubeconfig

#设置客户端认证参数
/opt/kubernetes/bin/kubectl config set-credentials kubelet-bootstrap \
--token=c64c942862608063f2eabff13077ef7e \
--kubeconfig=bootstrap.kubeconfig

#设置上下文参数
/opt/kubernetes/bin/kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig

#设置默认上下文
/opt/kubernetes/bin/kubectl config use-context default --kubeconfig=bootstrap.kubeconfig


#创建节点kube-proxy kubeconfig文件
/opt/kubernetes/bin/kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/ssl/ca.pem \
--embed-certs=true \
--server=https://192.168.43.205:6443 \
--kubeconfig=kube-proxy.kubeconfig

/opt/kubernetes/bin/kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig

/opt/kubernetes/bin/kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig

/opt/kubernetes/bin/kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig


g.获取K8S二进制包
从git上下载二进制安装包,并解压


h.运行master组件
#安装kube-apiserver
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \
--logtostderr=true \
--v=4 \
--etcd-servers=https://192.168.43.205:2379,https://192.168.43.206:2379,https://192.168.43.207:2379 \
--insecure-bind-address=0.0.0.0 \
--bind-address=192.168.43.205 \
--insecure-port=8080 \
--secure-port=6443 \
--advertise-address=192.168.43.205 \
--allow-privileged=true \
--service-cluster-ip-range=10.10.10.0/24 \
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
--authorization-mode=RBAC,Node \
--kubelet-https=true \
--enable-bootstrap-token-auth \
--token-auth-file=/opt/kubernetes/cfg/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--etcd-cafile=/opt/kubernetes/ssl/ca.pem \
--etcd-certfile=/opt/kubernetes/ssl/server.pem \
--etcd-keyfile=/opt/kubernetes/ssl/server-key.pem
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF


systemctl start kube-apiserver
systemctl enable kube-apiserver

#安装kube-controller-manager
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect=true \
--address=127.0.0.1 \
--service-cluster-ip-range=10.10.10.0/24 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--root-ca-file=/opt/kubernetes/ssl/ca.pem
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl start kube-controller-manager.service
systemctl enable kube-controller-manager.service

#安装kube-scheduler 
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes

[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \
--logtostderr=true \
--v=4 \
--master=127.0.0.1:8080 \
--leader-elect
Restart=on-failure

[Install]
WantedBy=multi-user.target
EOF

systemctl start kube-scheduler.service
systemctl enable kube-scheduler.service

#检查master是否安装成功
kubectl get cs

i.运行node组件
#安装kubelet
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \
--logtostderr=true \
--v=4 \
--address=192.168.43.206 \
--hostname-override=192.168.43.206 \
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
--cert-dir=/opt/kubernetes/ssl \
--allow-privileged=true \
--cluster-dns=10.10.10.2 \
--cluster-domain=cluster.local \
--fail-swap-on=false \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF

#给用户kubelet-bootstrap授权获取证书
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

#获取证书请求情况
kubectl get csr
#同意获取证书
kubectl certificate approve node-csr-1WbGGKYvoiLOLYQXZDeXWaFUAOy-wCF5kXrN0l9Pd5c
kubectl certificate approve node-csr-uC1bOZEvKhWJ4Zgsy2UjS_6ATRR4oPgy6xfX73Ulj_Q


systemctl start kubelet.service
systemctl enable kubelet.service

#安装kube-proxy
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kub-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \
--logtostderr=true \
--v=4 \
--hostname-override=192.168.43.206 \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig 
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF

systemctl start kube-proxy.service
systemctl enable kube-proxy.service

j.查询集群状态
kubectl get cs
kubectl get nodes

k.启动测试示例
kubectl run nginx --image=nginx --replicas=3
kubectl get pod
kubectl expose deployment nginx --port=8080 --target-port=80 --type=NodePort
kubectl get svc nginx

#注意iptable版本会影响kube-proxy组件部署
https://archive.kernel.org/centos-vault/7.4.1708/isos/x86_64/

rpm -qa | grep iptables
rpm -ivh iptables-1.4.21-24.el7.x86_64.rpm

yum remove iptables
yum install -y iptables-1.4.21-24.el7.x86_64

l.部署Dashboard
#创建用户和角色dashboard-rbca.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
  name: kubernetes-dashboard
  namespace: kube-system
---

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    addonmanager.kubernetes.io/mode: Reconcile
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kube-system

#部署软件dashboard-deployment.ymal

apiVersion: apps/v1beta2
kind: Deployment
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
    spec:
      serviceAccountName: kubernetes-dashboard
      containers:
      - name: kubernetes-dashboard
        image: registry.cn-hangzhou.aliyuncs.com/kube_containers/kubernetes-dashboard-amd64:v1.7.1
        resources:
          limits:
            cpu: 100m
            memory: 300Mi
          requests:
            cpu: 100m
            memory: 100Mi
        ports:
        - containerPort: 9090
          protocol: TCP
        args: 
        - --apiserver-host=http://192.168.43.100:8080
        livenessProbe:
          httpGet:
            scheme: HTTP
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
      tolerations:
      - key: "CriticalAddonsOnly"
        operator: "Exists"

#部署对外服务dashboard-service.yaml

apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  type: NodePort
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 80
    targetPort: 9090

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐