K8S(Kubernetes) 作用不多说,只要你搜到这篇文章就说明,你对K8S已经有所了解了,具体里面的组件我就不多说了。

一 准备了2台机器(资源有限就这样吧)

     10.211.55.34 (master)

     10.211.55.35 (node)

二 设置一下服务器(两台都要设定)

     关闭防火墙

#> systemctl disable firewalld.service
#> systemctl stop firewalld.service

    关闭分区

#> setenforce 0
#> vi /etc/selinux/config
   打开文件后
   SELINUX=disabled

三 安装 kubernetes和etcd

      1 /  10.211.55.34上不安装docker

#> yum -y install kubernetes-master etcd

     2 /  10.211.55.35 在上安装kubernetes-node etcd flannel docker

#> yum install *rhsm* -y
#> yum -y install kubernetes-node etcd flannel docker

四 配置ectd (这个是独立的只是k8s内部使用了这个数据库)

     配置完ectd启动是有顺序的,先启动node节点在启动master节点

    1/  配置node的ectd

[root@k8s-n ~]# vi /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
#供外部客户端使用的URL
#ETCD_LISTEN_PEER_URLS="http://10.211.55.35:2380"
#广播给外部客户端使用的URL
ETCD_LISTEN_CLIENT_URLS="http://10.211.55.35:2379,http://localhost:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#指定etcd的名称每个节点唯一不能重复
ETCD_NAME="etcd2"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
#集群内部通信使用的URL
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.211.55.35:2380"
#广播给集群内其他成员访问的URL
ETCD_ADVERTISE_CLIENT_URLS="http://10.211.55.35:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
#初始集群成员列表
ETCD_INITIAL_CLUSTER="etcd1=http://10.211.55.34:2380,etcd2=http://10.211.55.35:2380"
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
#ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
#
#[Proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#
#[Security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_AUTO_TLS="false"
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
#ETCD_PEER_AUTO_TLS="false"
#
#[Logging]
#ETCD_DEBUG="false"
#ETCD_LOG_PACKAGE_LEVELS=""
#ETCD_LOG_OUTPUT="default"
#
#[Unsafe]
#ETCD_FORCE_NEW_CLUSTER="false"
#
#[Version]
#ETCD_VERSION="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#
#[Profiling]
#ETCD_ENABLE_PPROF="false"
#ETCD_METRICS="basic"
#
#[Auth]
#ETCD_AUTH_TOKEN="simple"


  2 / 配置master节点的etcd

[root@k8s-m ~]# vi /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
ETCD_LISTEN_PEER_URLS="http://10.211.55.34:2380"
ETCD_LISTEN_CLIENT_URLS="http://10.211.55.34:2379,http://127.0.0.1:2379"
ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
ETCD_NAME="etcd1"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.211.55.34:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.211.55.34:2379,http://127.0.0.1:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="etcd1=http://10.211.55.34:2380,etcd2=http://10.211.55.35:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-k8s"
ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
#
#[Proxy]
#ETCD_PROXY="off"
#ETCD_PROXY_FAILURE_WAIT="5000"
#ETCD_PROXY_REFRESH_INTERVAL="30000"
#ETCD_PROXY_DIAL_TIMEOUT="1000"
#ETCD_PROXY_WRITE_TIMEOUT="5000"
#ETCD_PROXY_READ_TIMEOUT="0"
#
#[Security]
#ETCD_CERT_FILE=""
#ETCD_KEY_FILE=""
#ETCD_CLIENT_CERT_AUTH="false"
#ETCD_TRUSTED_CA_FILE=""
#ETCD_AUTO_TLS="false"
#ETCD_PEER_CERT_FILE=""
#ETCD_PEER_KEY_FILE=""
#ETCD_PEER_CLIENT_CERT_AUTH="false"
#ETCD_PEER_TRUSTED_CA_FILE=""
#ETCD_PEER_AUTO_TLS="false"
#
#[Logging]
#ETCD_DEBUG="false"
#ETCD_LOG_PACKAGE_LEVELS=""
#ETCD_LOG_OUTPUT="default"
#
#[Unsafe]
#ETCD_FORCE_NEW_CLUSTER="false"
#
#[Version]
#ETCD_VERSION="false"
#ETCD_AUTO_COMPACTION_RETENTION="0"
#
#[Profiling]
#ETCD_ENABLE_PPROF="false"
#ETCD_METRICS="basic"
#
#[Auth]
#ETCD_AUTH_TOKEN="simple"

启动 ECTD 注意要先启动node节点

#> systemctl start etcd.service (启动)
#> systemctl status etcd.service (查看状态)

启动master同理(启动完成后查看状态)

五 配置kubernetes flanneld

   

[root@k8s-n ~]# vi /etc/kubernetes/config 
###
# kubernetes system config
#
# The following values are used to configure various aspects of all
# kubernetes services, including
#
#   kube-apiserver.service
#   kube-controller-manager.service
#   kube-scheduler.service
#   kubelet.service
#   kube-proxy.service
# logging to stderr means we get it in the systemd journal
KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug
KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers
KUBE_ALLOW_PRIV="--allow-privileged=false"

# How the controller-manager, scheduler, and proxy find the apiserver
# 注意这里修改为masetr地址
KUBE_MASTER="--master=http://10.211.55.34:8080" 
[root@k8s-n ~]# vi /etc/kubernetes/kubelet
###
# kubernetes kubelet (minion) config

# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS="--address=127.0.0.1"

# The port for the info server to serve on
# KUBELET_PORT="--port=10250"

# You may leave this blank to use the actual hostname
KUBELET_HOSTNAME="--hostname-override=10.211.55.35"

# location of the api-server
#这里修改为master接口
KUBELET_API_SERVER="--api-servers=http://10.211.55.34:8080"

# pod infrastructure container
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"

# Add your own!
KUBELET_ARGS=""
[root@k8s-n ~]# vi /etc/sysconfig/flanneld
# Flanneld configuration options

# etcd url location.  Point this to the server where etcd runs
# 修改为master地址
FLANNEL_ETCD_ENDPOINTS="http://10.211.55.34:2379"

# etcd config key.  This is the configuration key that flannel queries
# For address range assignment
FLANNEL_ETCD_PREFIX="/atomic.io/network"

# Any additional options that you want to pass
#FLANNEL_OPTIONS=""

  启动服务

#> service docker start
#> systemctl start kubelet.service
#> systemctl start kube-proxy.service
#> systemctl enable kubelet.service
#> systemctl enable kube-proxy.service

 在maset上查看一下状态(成功了)

PS:在启动doker的时候报如下错误

如果出现错误:

vi /etc/sysconfig/docker 修改一下配置文件如下图

OK 到此安装好了,查看一下节点(就可怜的一个)

数据库查看状态

六 测试

在node 上 下载pod

[root@k8s-n ~]# docker pull registry.access.redhat.com/rhel7/pod-infrastructure:latest
[root@k8s-n ~]# docker pull  nginx

下载有可能会报错误如下图

如果报错执行如下命令

[root@k8s-n ~]# wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
[root@k8s-n ~]# rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem

在master上启动

kubectl run my-nginx --image=nginx --replicas=2 --port=80

 

 

 

 

 

 

 

 

 

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐