1.安装master主节点

  1. 修改主节点-hostname

vi /etc/hostname

master001

  1. 修改主节点-hosts

vi /etc/hosts

172.17.93.186   master001       master001

 

  1. 修改子节点1-hostname

vi /etc/hostname

slave001

  1. 修改子节点1-hosts

vi /etc/hosts

172.17.93.187   slave001        slave001

  1. 修改子节点2-hostname

vi /etc/hostname

slave002

  1. 修改子节点2-hosts

vi /etc/hosts

172.17.93.188   slave002        slave002

  1. 修改默认网络服务TCP参数

vi /etc/sysctl.conf

net.ipv4.tcp_tw_reuse = 1

net.ipv4.tcp_tw_recycle = 1

net.ipv4.tcp_fin_timeout = 5

net.ipv4.ip_local_port_range = 1024 65535

net.core.wmem_max = 4194304

net.core.rmem_max = 4194304

 

fs.file-max=6291454

fs.nr_open=6291454

 

  1. 其余子节点如上修改tcp
  2. 重启所有机器 reboot
  3. 把安装包传输到root目录下

docker-1.12.6.tgz

etcd-v3.1.10-linux-amd64.tar.gz

flannel-v0.9.0-linux-amd64.tar.gz

hyperkube-amd64.v1.9.3.tar.zip

 

  1. 解压安装包

tar -zxvf etcd-v3.1.10-linux-amd64.tar.gz

tar -zxvf flannel-v0.9.0-linux-amd64.tar.gz

tar -zxvf docker-1.12.6.tgz

 

apt update

apt install unzip

 

unzip hyperkube-amd64.v1.9.3.tar.zip

 

  1. 把kubectl设置为可执行文件

chmod +x kubectl

chmod +x kubelet

 

  1. 安装etcd   进入etcd解压目录

cd etcd-v3.1.10-linux-amd64/

 

//复制两个文件到bin目录

cp etcd etcdctl /usr/local/bin

 

//测试有没有安装成功

etcd --help

 

//创建etcd服务

vi /lib/systemd/system/etcd3.service

 

[Unit]

Description=etcd3

Conflicts=etcd.service etcd2.service

 

[Service]

User=root

Type=notify

WorkingDirectory=/var/lib/etcd3/

ExecStart=/usr/local/bin/etcd --config-file /etc/etcd/etcd3.conf

Restart=always

RestartSec=10s

LimitNOFILE=40000

TimeoutStartSec=0

 

[Install]

WantedBy=multi-user.target

 

//创建配置文件夹

//增加配置文件

mkdir -p /etc/etcd

 

vi /etc/etcd/etcd3.conf

 

name: "default"

data-dir: "/var/lib/etcd3/default.etcd"

listen-client-urls: "http://172.17.93.186:2379"

advertise-client-urls: "http://172.17.93.186:2379"

listen-peer-urls: "http://172.17.93.186:2380"

initial-advertise-peer-urls: "http://172.17.93.186:2380"

initial-cluster: default=http://172.17.93.186:2380

initial-cluster-token: coreos-token01

 

 

//创建服务

mkdir -p /var/lib/etcd3/

 

systemctl enable etcd3

 

systemctl start etcd3

 

 

//验证

 

查看key

 

ETCDCTL_API=3 etcdctl --endpoints=http://172.17.93.186:2379 get / --prefix --keys-only

 

ETCDCTL_API=3 etcdctl --endpoints=http://172.17.93.186:2379 get / --prefix

 

查看etcd节点

 

ETCDCTL_API=3  etcdctl --endpoints=http://172.17.93.186:2379 member list

 

  1. 添加ectd配置

curl -X PUT -d "value={\"Network\":\"172.20.0.0/16\",\"Backend\":{\"Type\":\"vxlan\"}}" "172.17.93.186:2379/v2/keys/coreos.com/network/config"

 

  1. mater安装flanneld

cp ./flanneld /usr/local/bin

cp ./mk-docker-opts.sh /usr/local/bin

  1. 配置文件

//主节点master node

 

vi /lib/systemd/system/flanneld.service

 

[Unit]

Description=Network fabric for containers

Documentation=https://github.com/coreos/flannel

After=etcd.service etcd2.service etcd3.service

Before=docker.service

 

[Service]

Type=notify

Restart=always

RestartSec=5

EnvironmentFile=/etc/flanneld/flanneld.conf

ExecStartPre=/sbin/modprobe ip_tables

ExecStart=/usr/local/bin/flanneld $FLANNEL_OPTS

ExecStartPost=/usr/local/bin/mk-docker-opts.sh -d /run/flannel_docker_opts.env -i

 

[Install]

WantedBy=multi-user.target

 

//创建配置文件

mkdir -p /etc/flanneld

 

vi /etc/flanneld/flanneld.conf

 

//集群模式

FLANNEL_OPTS="-etcd-endpoints=http://10.0.4.201:2379,http://10.0.4.202:2379,http://10.0.4.203:2379  -etcd-prefix=/coreos.com/network -iface=10.0.4.201 --ip-masq=true"

//单机模式

FLANNEL_OPTS="-etcd-endpoints=http://172.17.93.186:2379  -etcd-prefix=/coreos.com/network -iface=172.17.93.186 -ip-masq=true"

 

//开启服务

systemctl enable flanneld

 

systemctl start flanneld

 

  1. master安装docker

cd

cp /root/docker/* /usr/local/bin

 

//编辑docker.service配置文件

vi /lib/systemd/system/docker.service

//配置文件

 

[Unit]

Description=Docker Application Container Engine

Documentation=https://docs.docker.com

After=flanneld.service docker.socket

Requires=flanneld.service docker.socket

 

[Service]

Type=notify

# the default is not to use systemd for cgroups because the delegate issues still

# exists and systemd currently does not support the cgroup feature set required

# for containers run by docker

EnvironmentFile=-/etc/default/docker

EnvironmentFile=-/run/flannel_docker_opts.env

ExecStart=/usr/local/bin/dockerd --storage-driver=overlay -H fd:// $DOCKER_OPTS $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPT_IPMASQ

ExecReload=/bin/kill -s HUP $MAINPID

# Having non-zero Limit*s causes performance problems due to accounting overhead

# in the kernel. We recommend using cgroups to do container-local accounting.

LimitNOFILE=infinity

LimitNPROC=infinity

LimitCORE=infinity

# Uncomment TasksMax if your systemd version supports it.

# Only systemd 226 and above support this version.

TasksMax=infinity

TimeoutStartSec=0

# set delegate yes so that systemd does not reset the cgroups of docker containers

Delegate=yes

# kill only the docker process, not all processes in the cgroup

KillMode=process

 

[Install]

WantedBy=multi-user.target

 

 

#--storage-driver=overlay

#--storage-driver=devicemapper

 

//编辑docker.socket配置文件

vi /lib/systemd/system/docker.socket

 

[Unit]

Description=Docker Socket for the API

PartOf=docker.service

 

[Socket]

ListenStream=/var/run/docker.sock

SocketMode=0660

SocketUser=root

SocketGroup=root

 

[Install]

WantedBy=sockets.target

 

//docker配置

vi /etc/default/docker

 

DOCKER_OPTS="--selinux-enabled --insecure-registry local-registry.com"

 

  1. 修改host文件,增加docker本地仓库host配置

172.17.93.186   local-registry.com

  1. 配置docker服务

systemctl enable docker

 

systemctl start docker

  1. 安装docker私有仓库(http)

//私有仓库挂载地址

mkdir -p /opt/registry

 

//已启动可忽略

systemctl enable docker

systemctl start docker

 

//启动私有仓库命令

docker run -d -v /opt/registry:/var/lib/registry -p 80:5000 --restart=always --name registry registry:2.6.2

查看启动情况

root@master001:~/docker# docker ps -a

CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                  NAMES

203e9ffcb605        registry:2.6.2      "/entrypoint.sh /etc/"   6 seconds ago       Up 5 seconds        0.0.0.0:80->5000/tcp   registry

  1. 测试私有仓库push

docker load < hyperkube-amd64.v1.9.3.tar

 

//打标签成私有仓库

docker tag k8s.gcr.io/hyperkube-amd64:v1.9.3 local-registry.com/hyperkube-amd64:v1.9.3

//测试推送

docker push local-registry.com/hyperkube-amd64

  1. 生成证书

//root目录

mkdir cakey

 

cd cakey

//生成证书

openssl genrsa -out ca-key.pem 2048

 

openssl req -x509 -new -nodes -key ca-key.pem -days 10000 -out ca.pem -subj "/CN=kube-ca"

 

 

// Master证书

vi openssl.cnf

 

[req]

req_extensions = v3_req

distinguished_name = req_distinguished_name

[req_distinguished_name]

[ v3_req ]

basicConstraints = CA:FALSE

keyUsage = nonRepudiation, digitalSignature, keyEncipherment

subjectAltName = @alt_names

[alt_names]

DNS.1 = kubernetes

DNS.2 = kubernetes.default

DNS.3 = kubernetes.default.svc

DNS.4 = kubernetes.default.svc.cluster.local

IP.1 = 192.168.0.1

IP.2 = 172.17.93.186

 

 

// 生成 api-server key

 

openssl genrsa -out apiserver-key.pem 2048

 

openssl req -new -key apiserver-key.pem -out apiserver.csr -subj "/CN=kube-apiserver" -config openssl.cnf

 

openssl x509 -req -in apiserver.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out apiserver.pem -days 10000 -extensions v3_req -extfile openssl.cnf

 

//生成worker-openssl.cnf

 

vi worker-openssl.cnf

 

[req]

req_extensions = v3_req

distinguished_name = req_distinguished_name

[req_distinguished_name]

[ v3_req ]

basicConstraints = CA:FALSE

keyUsage = nonRepudiation, digitalSignature, keyEncipherment

subjectAltName = @alt_names

[alt_names]

IP.1 = $ENV::WORKER_IP

 

//生成子节点证书

slave01

 

openssl genrsa -out 172.17.93.187-worker-key.pem 2048

 

WORKER_IP=172.17.93.187 openssl req -new -key 172.17.93.187-worker-key.pem -out 172.17.93.187-worker.csr -subj "/CN=172.17.93.187" -config worker-openssl.cnf

 

WORKER_IP=172.17.93.187 openssl x509 -req -in 172.17.93.187-worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out 172.17.93.187-worker.pem -days 10000 -extensions v3_req -extfile worker-openssl.cnf

 

slave02

 

openssl genrsa -out 172.17.93.188-worker-key.pem 2048

 

WORKER_IP=172.17.93.188 openssl req -new -key 172.17.93.188-worker-key.pem -out 172.17.93.188-worker.csr -subj "/CN=172.17.93.188" -config worker-openssl.cnf

 

WORKER_IP=172.17.93.188 openssl x509 -req -in 172.17.93.188-worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out 172.17.93.188-worker.pem -days 10000 -extensions v3_req -extfile worker-openssl.cnf

 

slave03

 

openssl genrsa -out slave03-worker-key.pem 2048

 

WORKER_IP=10.1.12.83 openssl req -new -key slave03-worker-key.pem -out slave03-worker.csr -subj "/CN=slave03" -config worker-openssl.cnf

 

WORKER_IP=10.1.12.83 openssl x509 -req -in slave03-worker.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out slave03-worker.pem -days 10000 -extensions v3_req -extfile worker-openssl.cnf

  1. 安装kubelet

//复制kubectl kubelet两文件到bin目录

cp kubectl kubelet /usr/local/bin/

 

//后加的,需要验证

docker load < pause-amd64:3.0

docker push pause-amd64:3.0

 

//查看版本

kubelet --version

 

//安装配置

mkdir -p /etc/kubernetes/ssl

 

cp /root/cakey/ca.pem /etc/kubernetes/ssl/ca.pem

 

cp /root/cakey/apiserver.pem /etc/kubernetes/ssl/apiserver.pem

 

cp /root/cakey/apiserver-key.pem /etc/kubernetes/ssl/apiserver-key.pem

 

chmod 600 /etc/kubernetes/ssl/*-key.pem

 

chown root:root /etc/kubernetes/ssl/*-key.pem

 

cd /etc/kubernetes/ssl/

 

ln -s apiserver.pem worker.pem

 

ln -s apiserver-key.pem worker-key.pem

-- 软连接屏蔽名字的差异

 

//生成静态pod的目录

 

mkdir -p /etc/kubernetes/manifests

 

vi /etc/kubernetes/worker-kubeconfig.yaml

 

apiVersion: v1

kind: Config

clusters:

- name: master01

  cluster:

    certificate-authority: /etc/kubernetes/ssl/ca.pem

    api-version: v1

    server: https://172.17.93.186:6443

users:

- name: kubelet

  user:

    client-certificate: /etc/kubernetes/ssl/worker.pem

    client-key: /etc/kubernetes/ssl/worker-key.pem

contexts:

- context:

    cluster: master01

    user: kubelet

  name: kubelet-context

current-context: kubelet-context

 

//配置service文件

vi /lib/systemd/system/kubelet.service

 

[Unit]

Description=Kubernetes Kubelet Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=docker.service

Requires=docker.service

 

[Service]

WorkingDirectory=/var/lib/kubelet

EnvironmentFile=-/etc/kubernetes/kubelet

ExecStart=/usr/local/bin/kubelet \

  --register-schedulable=false \

  --allow-privileged=true \

  --pod-manifest-path=/etc/kubernetes/manifests \

  --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml \

  --tls-cert-file=/etc/kubernetes/ssl/worker.pem \

  --tls-private-key-file=/etc/kubernetes/ssl/worker-key.pem \

  --hostname-override=172.17.93.186 \

  --cluster-dns=192.168.200.1 \

  --cluster-domain=cluster.local \

  --pod-infra-container-image='local-registry.com/pause-amd64:3.0' \

  --v=2 \

  --cadvisor-port=4194 \

  --fail-swap-on=false \

  $KUBELET_ARGS

  

Restart=on-failure

RestartSec=5

 

[Install]

WantedBy=multi-user.target

 

//创建工作目录

mkdir -p /var/lib/kubelet    ---重要

 

 

//运行kubelet服务

 

systemctl enable kubelet

 

systemctl start kubelet

  1. 创建apiserver.yaml

vi /etc/kubernetes/manifests/kube-apiserver.yaml

 

apiVersion: v1

kind: Pod

metadata:

  name: kube-apiserver

  namespace: kube-system

spec:

  hostNetwork: true

  containers:

  - name: kube-apiserver

    image: local-registry.com/hyperkube-amd64:v1.9.3

    command:

    - /hyperkube

    - apiserver

    - --insecure-bind-address=127.0.0.1

    - --bind-address=172.17.93.186

    - --insecure-port=8080

    - --etcd-servers=http://172.17.93.186:2379

    - --allow-privileged=true

    - --service-cluster-ip-range=192.168.0.0/16

    - --secure-port=6443

    - --advertise-address=172.17.93.186

    - --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota

    - --tls-cert-file=/etc/kubernetes/ssl/apiserver.pem

    - --tls-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem

    - --client-ca-file=/etc/kubernetes/ssl/ca.pem

    - --service-account-key-file=/etc/kubernetes/ssl/apiserver-key.pem

    - --runtime-config=extensions/v1beta1/networkpolicies=true

    - --anonymous-auth=false

    livenessProbe:

      httpGet:

        host: 127.0.0.1

        port: 8080

        path: /healthz

      initialDelaySeconds: 15

      timeoutSeconds: 15

    ports:

    - containerPort: 6443

      hostPort: 6443

      name: https

    - containerPort: 8080

      hostPort: 8080

      name: local

    volumeMounts:

    - mountPath: /etc/kubernetes/ssl

      name: ssl-certs-kubernetes

      readOnly: true

    - mountPath: /etc/ssl/certs

      name: ssl-certs-host

      readOnly: true

  volumes:

  - hostPath:

      path: /etc/kubernetes/ssl

    name: ssl-certs-kubernetes

  - hostPath:

      path: /usr/share/ca-certificates

name: ssl-certs-host

 

 

//出错

journalctl -ex

 

//查看节点

kubectl get pods --all-namespaces

 

 

  1. 从节点001创建proxy pod

//slave

 

vi /etc/kubernetes/manifests/kube-proxy.yaml

apiVersion: v1

kind: Pod

metadata:

  name: kube-proxy

  namespace: kube-system

spec:

  hostNetwork: true

  containers:

  - name: kube-proxy

    image: local-registry.com/hyperkube-amd64:v1.9.3

    command:

    - /hyperkube

    - proxy

    - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml

    - --proxy-mode=iptables

    securityContext:

      privileged: true

    volumeMounts:

      - mountPath: /etc/ssl/certs

        name: "ssl-certs"

      - mountPath: /etc/kubernetes/worker-kubeconfig.yaml

        name: "kubeconfig"

        readOnly: true

      - mountPath: /etc/kubernetes/ssl

        name: "etc-kube-ssl"

        readOnly: true

  volumes:

    - name: "ssl-certs"

      hostPath:

        path: "/usr/share/ca-certificates"

    - name: "kubeconfig"

      hostPath:

        path: "/etc/kubernetes/worker-kubeconfig.yaml"

    - name: "etc-kube-ssl"

      hostPath:

        path: "/etc/kubernetes/ssl"

 

Slave001

 

  1. 从节点001创建proxy pod

//slave

 

vi /etc/kubernetes/manifests/kube-proxy.yaml

apiVersion: v1

kind: Pod

metadata:

  name: kube-proxy

  namespace: kube-system

spec:

  hostNetwork: true

  containers:

  - name: kube-proxy

    image: local-registry.com/hyperkube-amd64:v1.9.3

    command:

    - /hyperkube

    - proxy

    - --kubeconfig=/etc/kubernetes/worker-kubeconfig.yaml

    - --proxy-mode=iptables

    securityContext:

      privileged: true

    volumeMounts:

      - mountPath: /etc/ssl/certs

        name: "ssl-certs"

      - mountPath: /etc/kubernetes/worker-kubeconfig.yaml

        name: "kubeconfig"

        readOnly: true

      - mountPath: /etc/kubernetes/ssl

        name: "etc-kube-ssl"

        readOnly: true

  volumes:

    - name: "ssl-certs"

      hostPath:

        path: "/usr/share/ca-certificates"

    - name: "kubeconfig"

      hostPath:

        path: "/etc/kubernetes/worker-kubeconfig.yaml"

    - name: "etc-kube-ssl"

      hostPath:

        path: "/etc/kubernetes/ssl"

 

Slave001

 

Master

 

 

 

 

 

 

 

 

 

 

  1. 上传coredns.1.1.0.tar 到master

docker load < coredns.1.1.0.tar

docker push local-registry.com/coredns:1.1.0

docker load < kube-heapster.v1.3.0.tar

 

docker tag gcr.io/google_containers/heapster-amd64:v1.3.0 local-registry.com/heapster-amd64:v1.3.0

docker tag gcr.io/google_containers/addon-resizer:1.7 local-registry.com/addon-resizer:1.7

 

docker push local-registry.com/addon-resizer:1.7

docker push local-registry.com/heapster-amd64:v1.3.0

 

docker load < kube-dns-autoscaler.1.1.1-r3.tar

docker push local-registry.com/cluster-proportional-autoscaler-amd64:1.1.1-r3

  1. 创建dns

mkdir coredns

 

  1. 安装coredns-dep.yaml

vi coredns-dep.yaml

 

apiVersion: v1

kind: ServiceAccount

metadata:

  name: coredns

  namespace: kube-system

  labels:

      kubernetes.io/cluster-service: "true"

      addonmanager.kubernetes.io/mode: Reconcile

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

    addonmanager.kubernetes.io/mode: Reconcile

  name: system:coredns

rules:

- apiGroups:

  - ""

  resources:

  - endpoints

  - services

  - pods

  - namespaces

  verbs:

  - list

  - watch

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  annotations:

    rbac.authorization.kubernetes.io/autoupdate: "true"

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

    addonmanager.kubernetes.io/mode: EnsureExists

  name: system:coredns

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: system:coredns

subjects:

- kind: ServiceAccount

  name: coredns

  namespace: kube-system

---

apiVersion: v1

kind: ConfigMap

metadata:

  name: coredns

  namespace: kube-system

  labels:

      addonmanager.kubernetes.io/mode: EnsureExists

data:

  Corefile: |

    .:53 {

        errors

        log

        health

        kubernetes cluster.local 192.168.0.0/16 {

            pods insecure

        }

        prometheus

        proxy . /etc/resolv.conf

        cache 30

    }

---

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: coredns

  namespace: kube-system

  labels:

    k8s-app: coredns

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "CoreDNS"

spec:

#  replicas: 1

  selector:

    matchLabels:

      k8s-app: coredns

  template:

    metadata:

      labels:

        k8s-app: coredns

    spec:

      serviceAccountName: coredns

      tolerations:

        - key: node-role.kubernetes.io/master

          effect: NoSchedule

        - key: "CriticalAddonsOnly"

          operator: "Exists"

      containers:

      - name: coredns

        image: local-registry.com/coredns:1.1.0

        imagePullPolicy: IfNotPresent

        resources:

          limits:

            memory: 170Mi

          requests:

            cpu: 100m

            memory: 70Mi

        args: [ "-conf", "/etc/coredns/Corefile" ]

        volumeMounts:

        - name: config-volume

          mountPath: /etc/coredns

        ports:

        - containerPort: 53

          name: dns

          protocol: UDP

        - containerPort: 53

          name: dns-tcp

          protocol: TCP

        - containerPort: 9153

          name: metrics

          protocol: TCP

        livenessProbe:

          httpGet:

            path: /health

            port: 8080

            scheme: HTTP

          initialDelaySeconds: 60

          timeoutSeconds: 5

          successThreshold: 1

          failureThreshold: 5

      dnsPolicy: Default

      volumes:

        - name: config-volume

          configMap:

            name: coredns

            items:

            - key: Corefile

              path: Corefile

 

 

  1. 安装coredns-svc.yaml

vi coredns-svc.yaml

 

apiVersion: v1

kind: Service

metadata:

  name: coredns

  namespace: kube-system

  labels:

    k8s-app: coredns

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "CoreDNS"

spec:

  selector:

    k8s-app: coredns

  clusterIP: 192.168.200.1

  ports:

  - name: dns

    port: 53

    protocol: UDP

  - name: dns-tcp

    port: 53

    protocol: TCP

  - name: metrics

    port: 9153

    protocol: TCP

  1. 安装dns-horizontal-autoscaler.yaml

vi dns-horizontal-autoscaler.yaml

 

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: kube-dns-autoscaler

  namespace: kube-system

  labels:

    k8s-app: kube-dns-autoscaler

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

spec:

  template:

    metadata:

      labels:

        k8s-app: kube-dns-autoscaler

      annotations:

        scheduler.alpha.kubernetes.io/critical-pod: ''

    spec:

      containers:

      - name: autoscaler

        image: local-registry.com/cluster-proportional-autoscaler-amd64:1.1.1-r3

        resources:

            requests:

                cpu: "20m"

                memory: "10Mi"

        command:

          - /cluster-proportional-autoscaler

          - --namespace=kube-system

          - --configmap=kube-dns-autoscaler

          # Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base

          - --target=Deployment/coredns

          # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.

          # If using small nodes, "nodesPerReplica" should dominate.

          - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}

          - --logtostderr=true

          - --v=2

      tolerations:

      - key: "CriticalAddonsOnly"

        operator: "Exists" 

  1. 安装heapster-controller.yaml

vi heapster-controller.yaml

 

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: heapster-v1.3.0

  namespace: kube-system

  labels:

    k8s-app: heapster

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    version: v1.3.0

spec:

  replicas: 1

  selector:

    matchLabels:

      k8s-app: heapster

      version: v1.3.0

  template:

    metadata:

      labels:

        k8s-app: heapster

        version: v1.3.0

      annotations:

        scheduler.alpha.kubernetes.io/critical-pod: ''

    spec:

      containers:

        - image: local-registry.com/heapster-amd64:v1.3.0

          name: heapster

          livenessProbe:

            httpGet:

              path: /healthz

              port: 8082

              scheme: HTTP

            initialDelaySeconds: 180

            timeoutSeconds: 5

          command:

            - /heapster

            - --source=kubernetes.summary_api:''

        - image: local-registry.com/addon-resizer:1.7

          name: heapster-nanny

          resources:

            limits:

              cpu: 50m

              memory: 192160Ki

            requests:

              cpu: 50m

              memory: 192160Ki

          env:

            - name: MY_POD_NAME

              valueFrom:

                fieldRef:

                  fieldPath: metadata.name

            - name: MY_POD_NAMESPACE

              valueFrom:

                fieldRef:

                  fieldPath: metadata.namespace

          command:

            - /pod_nanny

            - --cpu=80m

            - --extra-cpu=0.5m

            - --memory=140Mi

            - --extra-memory=4Mi

            - --threshold=5

            - --deployment=heapster-v1.3.0

            - --container=heapster

            - --poll-period=300000

            - --estimator=exponential

      tolerations:

        - key: "CriticalAddonsOnly"

          operator: "Exists"

 

 

  1. 安装heapster-service.yaml

vi heapster-service.yaml

 

kind: Service

apiVersion: v1

metadata:

  name: heapster

  namespace: kube-system

  labels:

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "Heapster"

spec:

  ports:

    - port: 80

      targetPort: 8082

  selector:

    k8s-app: heapster

 

  1. 创建上面五个pod

kubectl apply -f coredns-dep.yaml

kubectl apply -f coredns-svc.yaml

kubectl apply -f dns-horizontal-autoscaler.yaml

kubectl apply -f heapster-controller.yaml

kubectl apply -f heapster-service.yaml

 

 

 

  1. 安装nginx测试dns

vi nginx.yaml

 

apiVersion: v1

kind: Pod

metadata:

  name: nginx-server

  labels:

      app: nginx    

spec:

  containers:

  - name: nginx

    image: nginx:1.17.2-alpine

 

 

vi nginx-service.yaml

 

 

apiVersion: v1

kind: Service

metadata:

  name: web

spec:

  ports:

    - port: 7878

      targetPort: 80

      protocol: TCP

      name: web80

  selector:

app: nginx

 

 

kubectl apply -f nginx.yaml

kubectl apply -f nginx-service.yaml

 

vi alpine.yaml

 

apiVersion: v1

kind: Pod

metadata:

  name: client

spec:

  containers:

  - name: client

    image: alpine:3.8

    command:

    - sleep

    - "360000"

  

kubectl apply -f alpine.yaml

  1. 测试

kubectl exec -it client -- /bin/sh

 

nslookup web.default.svc.cluster.local

 

 

 

ping 172.20.101.4

ping 172.20.43.4

ping 172.17.93.187 

ping 172.17.93.188

  1. 安装tomcat镜像

上传tomcat-jre1.8-8.5.23.tar

 

docker load < tomcat-jre1.8-8.5.23.tar

//改名

docker tag local_registry.com/alpine/jre8/tomcat:8.5.23 local-registry.com/alpine/jre8/tomcat:8.5.23

//推送到私有仓库

docker push local-registry.com/alpine/jre8/tomcat:8.5.23

 

//创建pod节点配置文件

vi tomcat8.yaml

 

apiVersion: v1

kind: Pod

metadata:

  name: tomcat8-server

  labels:

      app: tomcat8    

spec:

  containers:

  - name: tomcat8

    image: local-registry.com/alpine/openjre8/tomcat:8.5.23

    ports:

    - containerPort: 8080

    securityContext:

      privileged: true

 

 

vi tomcat8-service.yaml

 

apiVersion: v1

kind: Service

metadata:

  name: tomcat8-web

spec:

  type: NodePort

  ports:

    - port: 8888

      targetPort: 8080

      protocol: TCP

      name: tomcat8web

      nodePort: 9999

  selector:

    app: tomcat8

 

 

kubectl apply -f tomcat8.yaml

kubectl apply -f tomcat8-service.yaml

 

  1. 代理服务器安装docker

 

在新的服务器上安装docker

cd

cp /root/docker/* /usr/local/bin

 

//编辑docker.service配置文件

vi /lib/systemd/system/docker.service

//配置文件

 

[Unit]

Description=Docker Application Container Engine

Documentation=https://docs.docker.com

After=docker.socket

Requires=docker.socket

 

[Service]

Type=notify

# the default is not to use systemd for cgroups because the delegate issues still

# exists and systemd currently does not support the cgroup feature set required

# for containers run by docker

EnvironmentFile=-/etc/default/docker

ExecStart=/usr/local/bin/dockerd --storage-driver=overlay -H fd:// $DOCKER_OPTS $DOCKER_OPT_BIP $DOCKER_OPT_MTU $DOCKER_OPT_IPMASQ

ExecReload=/bin/kill -s HUP $MAINPID

# Having non-zero Limit*s causes performance problems due to accounting overhead

# in the kernel. We recommend using cgroups to do container-local accounting.

LimitNOFILE=infinity

LimitNPROC=infinity

LimitCORE=infinity

# Uncomment TasksMax if your systemd version supports it.

# Only systemd 226 and above support this version.

TasksMax=infinity

TimeoutStartSec=0

# set delegate yes so that systemd does not reset the cgroups of docker containers

Delegate=yes

# kill only the docker process, not all processes in the cgroup

KillMode=process

 

[Install]

WantedBy=multi-user.target

 

//编辑docker.socket配置文件

vi /lib/systemd/system/docker.socket

 

[Unit]

Description=Docker Socket for the API

PartOf=docker.service

 

[Socket]

ListenStream=/var/run/docker.sock

SocketMode=0660

SocketUser=root

SocketGroup=root

 

[Install]

WantedBy=sockets.target

 

//docker配置

vi /etc/default/docker

 

DOCKER_OPTS="--selinux-enabled --insecure-registry local-registry.com"

 

  1. 配置haproxy

上传haproxy-zone-reload.1.7.9-alpine.tar

 

docker load < haproxy-zone-reload.1.7.9-alpine.tar

配置

 

运行haproxy docker

 

mkdir -p /opt/haproxy/config

 

vi /opt/haproxy/config/haproxy.cfg

 

//配置文件

global

    daemon

    maxconn 65536

    nbproc 1

    pidfile /var/run/haproxy.pid

    spread-checks 10

    log 127.0.0.1 local3 info

 

defaults

    log global

    mode http

    option tcpka

    option clitcpka

    maxconn 65536

    retries 3

    balance roundrobin

    timeout connect 10000ms

    timeout client 50000ms

    timeout server 50000ms

 

listen tomcat-service-9999

    bind 172.17.93.189:9999

    server server1 172.17.93.186:32322 check port 32322 inter 2000 rise 2 fall 2

    server server2 172.17.93.187:32322 check port 32322 inter 2000 rise 2 fall 2

    server server3 172.17.93.188:32322 check port 32322 inter 2000 rise 2 fall 2

 

global

    daemon

    maxconn 65536

    nbproc 1

    pidfile /var/run/haproxy.pid

    spread-checks 10

    log 127.0.0.1 local3 info

 

defaults

    log global

    mode http

    option tcpka

    option clitcpka

    maxconn 65536

    retries 3

    balance roundrobin

    timeout connect 10000ms

    timeout client 50000ms

    timeout server 50000ms

 

listen app-service-8010

    bind 101.201.110.146:8010

    server server1 10.26.67.158:31667 check port 31667 inter 2000 rise 2 fall 2

    server server2 10.26.78.45:31667 check port 31667 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:31667 check port 31667 inter 2000 rise 2 fall 2

 

listen msql-3306

    bind 101.201.110.146:3306

    mode tcp

    server server1 10.26.78.45:30949 check

    server server2 10.47.213.62:30949 check

    server server3 10.25.198.160:30949 check

 

listen msql-3307

    bind 101.201.110.146:3307

    mode tcp

    server server1 10.26.78.45:31107 check

    server server2 10.47.213.62:31107 check

    server server3 10.25.198.160:31107 check

 

listen cassandra-9042

    bind 101.201.110.146:9042

    mode tcp

    server server1 10.25.245.253:9042 check

 

listen hard-38080

    bind 101.201.110.146:38080

    mode tcp

    server server1 10.26.67.158:30907 check

    server server2 10.26.78.45:30907 check

    server server3 10.25.198.160:30907 check

 

listen hard-38081

    bind 101.201.110.146:38081

    mode tcp

    server server1 10.26.67.158:30919 check

    server server2 10.26.78.45:30919 check

    server server3 10.25.198.160:30919 check

 

listen monitoringplat-8080

    bind 101.201.110.146:8080

    mode http

    server server1 10.26.67.158:30479 check port 30479 inter 2000 rise 2 fall 2

    server server2 10.26.78.45:30479 check port 30479 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:30479 check port 30479 inter 2000 rise 2 fall 2

 

frontend web_80

    bind 101.201.110.146:80

    mode http

    acl is_monitoringplat hdr_beg(host) -i gis.rtzltech.cn

    acl is_monitoringplat hdr_beg(host) -i gis.cloud.rtzltech.cn

    acl is_monitoringplat hdr_beg(host) -i www.zhys10000.com

    acl is_monitoringplat hdr_beg(host) -i zhys10000.com

    acl is_efservice hdr_beg(host) -i cnly.rtzltech.cn

 

    use_backend monitoringplat-80 if is_monitoringplat

    use_backend efservice-80 if is_efservice

 

backend monitoringplat-80

    mode http

    server server1 10.26.67.158:30479 check port 30479 inter 2000 rise 2 fall 2

    server server2 10.26.78.45:30479 check port 30479 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:30479 check port 30479 inter 2000 rise 2 fall 2

 

backend efservice-80

    mode http

    server server1 10.26.67.158:31248 check port 31248 inter 2000 rise 2 fall 2

    server server2 10.26.78.45:31248 check port 31248 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:31248 check port 31248 inter 2000 rise 2 fall 2

 

 

 

listen zhihuitransapi-8081

    bind 101.201.110.146:8081

    server server1 10.26.67.158:30754 check port 30754 inter 2000 rise 2 fall 2

    server server2 10.26.78.45:30754 check port 30754 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:30754 check port 30754 inter 2000 rise 2 fall 2

 

 

listen gps-trail-8090

    bind 101.201.110.146:8090

    server server1 10.26.67.158:32443 check port 32443 inter 2000 rise 2 fall 2

    server server2 10.26.78.45:32443 check port 32443 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:32443 check port 32443 inter 2000 rise 2 fall 2

 

listen zhihuitransplat-8088

    bind 101.201.110.146:8088

    server server1 10.26.67.158:30178 check port 30178 inter 2000 rise 2 fall 2

    server server2 10.47.213.62:30178 check port 30178 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:30178 check port 30178 inter 2000 rise 2 fall 2

 

listen user-web-8086

    bind 101.201.110.146:8086

    mode http

    server server1 10.46.178.141:30132 check port 30132 inter 2000 rise 2 fall 2

    server server2 10.47.213.62:30132 check port 30132 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:30132 check port 30132 inter 2000 rise 2 fall 2

 

listen cdcwms-8091

    bind 101.201.110.146:8091

    mode http

    server server1 10.26.67.158:30940 check port 30940 inter 2000 rise 2 fall 2

    server server2 10.47.213.62:30940 check port 30940 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:30940 check port 30940 inter 2000 rise 2 fall 2

 

listen cdccmc-8092

    bind 101.201.110.146:8092

    mode http

    server server1 10.26.67.158:31259 check port 31259 inter 2000 rise 2 fall 2

    server server2 10.47.213.62:31259 check port 31259 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:31259 check port 31259 inter 2000 rise 2 fall 2

 

listen cdcobd-8093

    bind 101.201.110.146:8093

    mode http

    server server1 10.26.67.158:30484 check port 30484 inter 2000 rise 2 fall 2

    server server2 10.47.213.62:30484 check port 30484 inter 2000 rise 2 fall 2

    server server3 10.25.198.160:30484 check port 30484 inter 2000 rise 2 fall 2

 

 

 

启动haproxy

 

docker run -d --name my-running-haproxy --network=host -v /opt/haproxy/config:/usr/local/etc/haproxy:ro --restart=always  local-registry.com/haproxy:1.7.9-alpine

 

//ssl---https

docker run -d --name myssl-running-haproxy --network=host -v /opt/haproxy/configssl:/usr/local/etc/haproxy:rw --restart=always  local-registry.com/haproxy:1.7.9-alpine

 

 

Reloading config

 

docker kill -s HUP my-running-haproxy

//已测试启动

docker run -d --name my-running-haproxy --network=host -v /opt/haproxy/config:/usr/local/etc/haproxy:ro --restart=always  local_registry.com/haproxy:1.7.9-alpine

 

docker run -d --name myssl-running-haproxy --network=host -v /opt/haproxy/configssl:/usr/local/etc/haproxy:rw --restart=always  local_registry.com/haproxy:1.7.9-alpine

  1. 安装mariadb

上传mariadb_10.1.18.tar

 

docker load < mariadb_10.1.18.tar

 

docker push local-registry.com/mariadb:10.1.18

 

//创建文件夹

mkdir -p /diskmap/mariadb3306/config

mkdir -p /diskmap/mariadb3306/data

 

把配置文件上传到config文件夹下

 

 

//编辑配置文件

vi /etc/kubernetes/manifests/mariadb-galera-pod-3306.yaml

 

apiVersion: v1

kind: Pod

metadata:

  name: mariadb-galera-3306-v10.1.18

  namespace: default

spec:

  hostNetwork: true

  containers:

    - name: mariadb-3306

      image: local-registry.com/mariadb:10.1.18

      env:

      - name: MYSQL_ROOT_PASSWORD

        value: handinfo

      - name: MYSQL_CONFIG_DIR

        value: "/config"

      ports:

      - containerPort: 3306

        protocol: TCP

      securityContext:

        privileged: true

      volumeMounts:

      - name: mariadb-config

        mountPath: /config

      - name: mariadb-data

        mountPath: /var/lib/mysql

  volumes:

  - name: mariadb-config

    hostPath:

      path: "/diskmap/mariadb3306/config"

  - name: mariadb-data

    hostPath:

      path: "/diskmap/mariadb3306/data"

 

需要修改绑定地址

vi /diskmap/mariadb3306/config/my.cnf.d/server.cnf

 

[client]

port            = 3306

socket          = /var/run/mysqld/mysqld.sock

 

[mysqld_safe]

socket          = /var/run/mysqld/mysqld.sock

nice            = 0

 

[mysqld]

skip-host-cache

skip-name-resolve

 

#

# * Basic Settings

#

user            = mysql

pid-file        = /var/run/mysqld/mysqld.pid

socket          = /var/run/mysqld/mysqld.sock

port            = 3306

basedir         = /usr

datadir         = /var/lib/mysql

tmpdir          = /tmp

lc_messages_dir = /usr/share/mysql

lc_messages     = en_US

skip-external-locking

lower_case_table_names = 1

 

#

# Instead of skip-networking the default is now to listen only on

# localhost which is more compatible and is not less secure.

bind-address           = 172.17.93.187

 

# * Fine Tuning

#

max_connections         = 10000

connect_timeout         = 60

wait_timeout            = 1800

max_allowed_packet      = 256M

thread_cache_size       = 128

sort_buffer_size        = 8M

bulk_insert_buffer_size = 64M

tmp_table_size          = 4096M

max_heap_table_size     = 10240M

 

read_buffer_size=8M

key_buffer_size=2048M

read_rnd_buffer_size=8M

 

#

# * Query Cache Configuration

#

# Cache only tiny result sets, so we can fit more in the query cache.

query_cache_limit               = 1M

query_cache_size                = 256M

 

 

slow_query_log_file     = /var/log/mysql/mariadb-slow.log

long_query_time = 3

slow_query_log = ON

 

expire_logs_days        = 10

max_binlog_size         = 100M

 

#log-bin = mysql-bin

#innodb-file-per-table = ON

#sync_binlog = 1

#sync_master_info = 1

#log-bin-trust-function-creators=1

 

default_storage_engine  = InnoDB

 

# you can't just change log file size, requires special procedure

innodb_log_file_size    = 512M

innodb_buffer_pool_size = 32G

#innodb_buffer_pool_instances = 16

innodb_log_buffer_size  = 32M

innodb_file_per_table   = 1

innodb_open_files       = 10240

innodb_io_capacity      = 800

innodb_flush_method     = O_DIRECT

innodb_data_file_path = ibdata1:1G:autoextend

innodb_log_files_in_group=3

innodb_flush_log_at_trx_commit=2

innodb_thread_concurrency = 32

innodb_write_io_threads = 8

innodb_read_io_threads = 8

innodb_lock_wait_timeout = 120

innodb_autoinc_lock_mode=2

#innodb_additional_mem_pool_size=128M

 

 

server-id               = 1

#

# * Galera-related settings

#

[galera]

#binlog_format=row

#innodb_autoinc_lock_mode=2

 

#wsrep_provider_options="base_port=4567"

 

#wsrep_provider_options="pc.bootstrap=true"

#

# Allow server to accept connections on all interfaces.

#

#bind-address=172.17.93.187

#

# Optional setting

#wsrep_slave_threads=6

#wsrep_convert_LOCK_to_trx=0

#wsrep_retry_autocommit=1

#wsrep_auto_increment_control=1

#wsrep_certify_nonPK=1

#wsrep_max_ws_rows=131072

#wsrep_max_ws_size=1073741824

 

 

[mysqldump]

quick

quote-names

max_allowed_packet      = 16M

 

[mysql]

#no-auto-rehash # faster start of mysql but no tab completion

 

[isamchk]

key_buffer              = 1M

  1. Traefik维护

主要维护cheese-ingress.yaml

 

Haproxy

 

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐