1, 集群网络规划

节点名称网卡ip|
masterhost-only, 网络地址转换(NAT)192.168.56.181
slavehost-only, 网络地址转换(NAT)192.168.56.182

2, yum安装k8s

master节点

yum -y install epel-release
yum update
yum -y install  etcd kubernetes-master ntp flannel

##############配置:集群时间同步
ln -sfT /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
cat >> /etc/ntp.conf<<EOF
## 允许56网段的服务器来校时,不允许客户端来修改,登录ntp服务器
restrict 192.168.56.0   mask  255.255.255.0 nomodify notrap

## 配置自己,充当时间同步服务器
server 127.127.1.0
fudge 127.127.1.0  stratum 10
EOF
service ntpd restart 

slave节点

yum -y install epel-release
yum update
yum -y install  kubernetes-node ntp flannel docker

##############配置:集群时间同步
ln -sfT /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
cat >> /etc/ntp.conf<<EOF
server  master   # master的ip
EOF
service ntpd restart 

3, 修改K8S配置

master节点

#1, etcd服务
cat >/etc/etcd/etcd.conf <<EOF
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS="http://localhost:2379,http://192.168.56.181:2379"
ETCD_NAME="default"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.56.181:2379"
EOF

#2, kubeletes: config
cat >  /etc/kubernetes/config  <<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.56.181:8080"
EOF

# 3, kubeletes: apiserver
cat >  /etc/kubernetes/apiserver <<EOF
KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.56.181:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=AlwaysAdmit"
KUBE_API_ARGS=""
EOF

# 4, kubeletes: controller-manager
cat >  /etc/kubernetes/controller-manager  <<EOF
KUBE_CONTROLLER_MANAGER_ARGS=""
EOF

#5, kubeletes: scheduler
cat > /etc/kubernetes/scheduler  <<EOF
KUBE_SCHEDULER_ARGS="--address=0.0.0.0"
EOF

#6, 启动所有服务
for x  in  etcd kube-apiserver kube-controller-manager kube-scheduler
do
         systemctl enable  $x ;
         systemctl start  $x ;
done

#7, 配置flanneld网络
etcdctl set /atomic.io/network/config '{"Network": "172.16.0.0/16"}'
cat >  /etc/sysconfig/flanneld  <<EOF
FLANNEL_ETCD_ENDPOINTS="http://192.168.56.181:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network2"
FLANNEL_OPTIONS=""
EOF

#8, 启动flanneld网络
systemctl enable  flanneld ;
systemctl restart  flanneld;

slave节点

#1, 配置flanneld网络
etcdctl set /atomic.io/network/config '{"Network": "172.16.0.0/16"}'
cat >  /etc/sysconfig/flanneld  <<EOF
FLANNEL_ETCD_ENDPOINTS="http://192.168.56.181:2379"
FLANNEL_ETCD_PREFIX="/atomic.io/network2"
FLANNEL_OPTIONS=""
EOF

# 启动flanneld网络
systemctl enable  flanneld ;
systemctl start  flanneld;

#2, kubeletes: config
cat >/etc/kubernetes/config  <<EOF
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.56.181:8080"
EOF

#3, kubeletes: proxy
cat >/etc/kubernetes/proxy <<EOF
KUBE_PROXY_ARGS="--bind-address=0.0.0.0"
EOF

#4, kubernetes:kubelet
cat > /etc/kubernetes/kubelet <<EOF
KUBELET_ADDRESS="--address=127.0.0.1"
KUBELET_HOSTNAME="--hostname-override=192.168.56.182"
KUBELET_API_SERVER="--api-servers=http://192.168.56.181:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
EOF

#5, 配置docker 使用aliyun加速, 使用cgroupfs作为cgroupdriver
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://yywkvob3.mirror.aliyuncs.com"]
}
{
   "exec-opts": ["native.cgroupdriver=cgroupfs"]
}
EOF

#7, 启动所有k8s服务
for  x   in  docker  flanneld kube-proxy kubelet
do
   systemctl  enable  $x
   systemctl restart  $x
done

4, 验证flanneld网络,创建nginx-pod

# 1, 验证flanneld
[root@master network-scripts]# etcdctl ls /
/registry
/atomic.io
[root@master network-scripts]# etcdctl ls /atomic.io
/atomic.io/network
[root@master network-scripts]# etcdctl ls /atomic.io/network
/atomic.io/network/config
/atomic.io/network/subnets
[root@master network-scripts]# etcdctl ls /atomic.io/network2/subnets
/atomic.io/network/subnets/172.16.10.0-24
/atomic.io/network/subnets/172.16.68.0-24

[root@master network-scripts]# etcdctl get /atomic.io/network2/subnets/172.16.10.0-24
{"PublicIP":"192.168.56.182"}
[root@master network-scripts]# etcdctl get /atomic.io/network2/subnets/172.16.68.0-24
{"PublicIP":"192.168.56.181"}

#2, 创建nginx-pod
[root@master network-scripts]# kubectl  get nodes
NAME             STATUS    AGE
192.168.56.182   Ready     2h

[root@master network-scripts]# kubectl run nginx --image=nginx --port=80  --replicas=1
deployment "nginx2" created
[root@master network-scripts]# kubectl get pods
NAME                     READY     STATUS              RESTARTS   AGE
nginx-1992866346-hn30z    0/1       ContainerCreating   0          5m

############################### 错误排查:##################################
kubect get pods 
-------------------------------------------

QoS Class:	BestEffort
Tolerations:	<none>
Events:
  FirstSeen	LastSeen	Count	From				SubObjectPath	Type		Reason		Message
  ---------	--------	-----	----				-------------	--------	------		-------
  3m		3m		1	{default-scheduler }				Normal		Scheduled	Successfully assigned nginx-3449338310-0hkck to 192.168.56.182
  2m		53s		3	{kubelet 192.168.56.182}			Warning		FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ErrImagePull: "image pull failed for registry.access.redhat.com/rhel7/pod-infrastructure:latest, this may be because there are no credentials on this request.  details: (open /etc/docker/certs.d/registry.access.redhat.com/redhat-ca.crt: no such file or directory)"

  1m	14s	4	{kubelet 192.168.56.182}		Warning	FailedSync	Error syncing pod, skipping: failed to "StartContainer" for "POD" with ImagePullBackOff: "Back-off pulling image \"registry.access.redhat.com/rhel7/pod-infrastructure:latest\""

################################解决问题:################################
yum -y  install *rhsm*
wget http://mirror.centos.org/centos/7/os/x86_64/Packages/python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm
rpm2cpio python-rhsm-certificates-1.19.10-1.el7_4.x86_64.rpm | cpio -iv --to-stdout ./etc/rhsm/ca/redhat-uep.pem | tee /etc/rhsm/ca/redhat-uep.pem    
docker pull registry.access.redhat.com/rhel7/pod-infrastructure:latest
docker pull nginx

# 再次查看pod状态
[root@master network-scripts]# kubectl get pods
NAME                     READY     STATUS    RESTARTS   AGE
nginx-1992866346-hn30z   1/1       Running   0          36m

#3, 发布nginx-pod 为service
[root@master network-scripts]# kubectl expose deployment nginx --port=80 --type=LoadBalancer
service "nginx" exposed

[root@master network-scripts]# kubectl get services
NAME         CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   10.254.0.1      <none>        443/TCP        2h
nginx        10.254.157.35   <pending>     80:31103/TCP   7s

[root@master network-scripts]# kubectl describe service nginx
Name:			nginx
Namespace:		default
Labels:			run=nginx
Selector:		run=nginx
Type:			LoadBalancer
IP:			10.254.157.35
Port:			<unset>	80/TCP
NodePort:		<unset>	31103/TCP
Endpoints:		172.16.10.2:80
Session Affinity:	None
No events.

#等待一段时间,在slave节点上: 验证nginx-service  expose的端口是否监听
[root@slave ~]# netstat -anop |grep 31103
tcp6       0      0 :::31103                :::*                    LISTEN      3279/kube-proxy      off (0.00/0/0)

[root@slave ~]# wget slave:31103
--2019-07-24 01:39:32--  http://slave:31103/
正在解析主机 slave (slave)... 192.168.56.182
正在连接 slave (slave)|192.168.56.182|:31103... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:612 [text/html]
正在保存至: “index.html”

100%[====================================================================================================>] 612         --.-K/s 用时 0s      
2019-07-24 01:39:32 (23.7 MB/s) - 已保存 “index.html” [612/612])
Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐