1.背景

k8s是对docker容器的集群化管理,实现了资源调度,自动管理生命周期,负载均衡,高可用

2.准备工作

机器准备:

mater机器:10.0.0.11

node机器:10.0.0.12

1.二进制安装

准备二进制文件

etcd:https://github.com/coreos/etcd/releases

kubernetes:https://kubernetes.io/docs/setup/release/notes/

 

下载的二进制文件如下:

kubernetes-server-linux-amd64.tar.gz

kubernetes-node-linux-amd64.tar.gz

etcd-v3.3.4-linux-amd64.tar.gz

docker自行安装

 

将这些二进制文件解压

server得到二进制文件:etcd,etcdctl,kube-apiserver,kubectl,kube-controller-manager,kube-proxy,kube-scheduler,kubelet

node得到二进制文件:kubectl,kubelet,kube-proxy

 

将server的二进制文件拷贝到master下的/usr/bin/

[root@k8s-master ~]# cp -rf etcd /usr/bin/

[root@k8s-master ~]# cp -rf etcdctl /usr/bin/

[root@k8s-master ~]# cp -rf kubectl /usr/bin/

[root@k8s-master ~]# cp -rf kube-controller-manager /usr/bin/

[root@k8s-master ~]# cp -rf kube-proxy /usr/bin/

[root@k8s-master ~]# cp -rf kube-scheduler /usr/bin/

[root@k8s-master ~]# cp -rf kubelet /usr/bin/

 

将node的二进制文件拷贝到node下的/usr/bin

[root@k8s-master ~]# cp -rf kubectl /usr/bin/

[root@k8s-master ~]# cp -rf kubelet /usr/bin/

[root@k8s-master ~]# cp -rf kube-proxy /usr/bin/

 

2.在线安装k8s

master节点上

[root@k8s-master ~]# yum install etcd -y 

[root@k8s-master ~]# yum install docker -y 

[root@k8s-master ~]# yum install kubernetes -y 

[root@k8s-master ~]# yum install flannel -y

node节点上

[root@k8s-node ~]# yum install docker -y

[root@k8s-node ~]# yum install kubernetes -y 

[root@k8s-node ~]# yum install flannel -y

 

3.确保运行的service

如果没有一下配置,则创建(一般在线安装会有如下配置)

[root@k8s-master ~]# vim /usr/lib/systemd/system/etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target
[Service]

Type=notify

WorkingDirectory=/var/lib/etcd/

EnvironmentFile=-/etc/etcd/etcd.conf

User=etcd

# set GOMAXPROCS to number of processors

ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""

Restart=on-failure

LimitNOFILE=65536



[Install]

WantedBy=multi-user.target

 

[root@k8s-master ~]# vim /usr/lib/systemd/system/kube-apiserver.service

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

After=etcd.service



[Service]

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/apiserver



ExecStart=/usr/bin/kube-apiserver \

$KUBE_LOGTOSTDERR \

$KUBE_LOG_LEVEL \

$KUBE_ETCD_SERVERS \

$KUBE_API_ADDRESS \

$KUBE_API_PORT \

$KUBELET_PORT \

$KUBE_ALLOW_PRIV \

$KUBE_SERVICE_ADDRESSES \

$KUBE_ADMISSION_CONTROL \

$KUBE_API_ARGS

Restart=on-failure

Type=notify

LimitNOFILE=65536



[Install]

WantedBy=multi-user.target

 

[root@k8s-master ~]# vim /usr/lib/systemd/system/kube-scheduler.service

[Unit]

Description=Kubernetes Scheduler Plugin

Documentation=https://github.com/GoogleCloudPlatform/kubernetes



[Service]

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/scheduler

User=kube

ExecStart=/usr/bin/kube-scheduler \

$KUBE_LOGTOSTDERR \

$KUBE_LOG_LEVEL \

$KUBE_MASTER \

$KUBE_SCHEDULER_ARGS

Restart=on-failure

LimitNOFILE=65536



[Install]

WantedBy=multi-user.target

[root@k8s-master ~]# vim /usr/lib/systemd/system/kubelet.service (node也许需要)

[Unit]

Description=Kubernetes Kubelet Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=docker.service

Requires=docker.service



[Service]

WorkingDirectory=/var/lib/kubelet

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/kubelet

ExecStart=/usr/bin/kubelet \

$KUBE_LOGTOSTDERR \

$KUBE_LOG_LEVEL \

$KUBELET_API_SERVER \

$KUBELET_ADDRESS \

$KUBELET_PORT \

$KUBELET_HOSTNAME \

$KUBE_ALLOW_PRIV \

$KUBELET_POD_INFRA_CONTAINER \

$KUBELET_ARGS

Restart=on-failure



[Install]

WantedBy=multi-user.target


 

[root@k8s-master ~]# vim /usr/lib/systemd/system/kube-proxy.service(node也需要)

[Unit]

Description=Kubernetes Kube-Proxy Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target



[Service]

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/proxy

ExecStart=/usr/bin/kube-proxy \

$KUBE_LOGTOSTDERR \

$KUBE_LOG_LEVEL \

$KUBE_MASTER \

$KUBE_PROXY_ARGS

Restart=on-failure

LimitNOFILE=65536



[Install]

WantedBy=multi-user.target

[root@k8s-master ~]# systemctl daemon-reload

4.修改配置文件及运行各模块

1.etcd

配置
[root@k8s-master ~]# vim /etc/etcd/etcd.conf
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_NAME="default"
ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379"
运行
[root@k8s-master ~]# systemctl enable etcd.service
[root@k8s-master ~]# systemctl  start etcd.service
验证
[root@k8s-master ~]# etcdctl -C http://10.0.0.11:2379 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://10.0.0.11:2379
cluster is healthy

2.apiserver

配置
[root@k8s-master ~]# vim /etc/kubernetes/apiserver
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBE_ETCD_SERVERS="--etcd-servers=http://10.0.0.11:2379"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS="--etcd-servers=http://10.0.0.11:2379 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --service-cluster-ip-range=170.170.0.0/16 --service-node-port-range=1-65535 --admission-control=NamespaceLifecycle,LimitRanger,ResourceQuota --logtostderr=false --log-dir=/data/log/kubenetes --v=2"

[root@k8s-master ~]# vim /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=true"
KUBE_MASTER="--master=http://10.0.0.11:8080"
运行
[root@k8s-master ~]# systemctl enable kube-apiserver.service
[root@k8s-master ~]# systemctl start kube-apiserver.service
[root@k8s-master ~]# systemctl enable kube-controller-manager.service
[root@k8s-master ~]# systemctl start kube-controller-manager.service
[root@k8s-master ~]# systemctl enable kube-scheduler.service
[root@k8s-master ~]# systemctl start kube-scheduler.service

验证1
[root@k8s-master ~]# systemctl status kube-apiserver.service kube-controller-manager.service  kube-scheduler.service
...running...
验证api
[root@k8s-master ~]# curl http://localhost:8080/api/

 

3. node节点

[root@k8s-node ~]# vim /etc/kubernetes/config
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=true"
KUBE_MASTER="--master=http://10.0.0.11:8080"
配置node-1
[root@k8s-node ~]# vim /etc/kubernetes/kubelet
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=node-1"
KUBELET_API_SERVER="--api-servers=http://10.0.0.11:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
运行
[root@k8s-node ~]# systemctl enable kubelet.service
[root@k8s-node ~]# systemctl start kubelet.service
[root@k8s-node ~]# systemctl enable kube-proxy.service
[root@k8s-node ~]# systemctl start kube-proxy.service
验证
master上进行验证
[root@k8s-master ~]# kubectl get nodes
NAME        STATUS    AGE
10.0.0.12   Ready     3m

 

4.配置网络flannel

配置
[root@k8s-master ~]# vim /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://10.0.0.11:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"

[root@k8s-node ~]# vim /etc/sysconfig/flanneld
FLANNEL_ETCD_ENDPOINTS="http://10.0.0.11:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network"
配置网络范围
[root@k8s-master ~]# etcdctl mk /atomic.io/network/config '{ "Network": "172.16.0.0/16" }'

运行
[root@k8s-master ~]# systemctl enable flanneld.service 
[root@k8s-master ~]# systemctl start flanneld.service 
[root@k8s-master ~]# service docker restart
[root@k8s-master ~]# systemctl restart kube-apiserver.service
[root@k8s-master ~]# systemctl restart kube-controller-manager.service
[root@k8s-master ~]# systemctl restart kube-scheduler.service

[root@k8s-node ~]# systemctl enable flanneld.service 
[root@k8s-node ~]# systemctl start flanneld.service 
[root@k8s-node ~]# service docker restart
[root@k8s-node ~]# systemctl restart kubelet.service
[root@k8s-node ~]# systemctl restart kube-proxy.service

 

4.整体验证

[root@k8s-master ~]# vim nginx-rc.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx
spec:
  replicas: 1
  selector:
    app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:latest
        ports:
        - containerPort: 80
[root@k8s-master ~]# kubectl create -f nginx-rc.yaml

pod "nginx" created

验证是否运行

[root@k8s-master ~]# kubectl get pods -o wide
NAME          READY     STATUS    RESTARTS   AGE       IP            NODE
nginx         1/1        Running       0      2h     172.16.42.2      10.0.0.12
[root@k8s-node ~]# docker ps -a

这里你会找到你运行的容器是否存在或则运行状态

至此你的部署完成

 

5.常见的部署问题

kubernetes创建集群资源对象, pod服务一直处于ContainerCreating状态问题。

failed to “StartContainer” for “POD” with ErrImagePull

1.查看pods状态,一直处于ContainerCreating状态

[root@k8s-master ~]# kubectl get pods

NAME        READY    STATUS              RESTARTS  AGE

nginx-pod  0/1      ContainerCreating   0          15h

2.查看错误信息

[root@k8s-master ~]# kubectl describe pod nginx-pod

...(open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)...

说明由于证书原因,无法拉取镜像,所以需要安装证书

证书安装方法1

[root@k8s-master ~]# yum install python-rhsm* -y

证书安装方法2

[root@k8s-master ~]# wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm

[root@k8s-master ~]# rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem

 

 

 

 

 

 

 

 

 

 

 

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐