kubernetes+docker
centos7_x64 内存4G+ cpu2核k8s_server 192.168.10.16 k8s1k8s_client 192.168.10.14 k8s2Master(管理节点)API Server:供Kubernetes API接口,主要处理 Rest操作以及更新Etcd中的对象。所有资源增删改查的唯一入口。Scheduler:绑定Pod到Node上,资源调度。Controller Ma
centos7_x64 内存4G+ cpu2核
k8s_server 192.168.10.16 k8s1
k8s_client 192.168.10.14 k8s2
Master(管理节点)
API Server:供Kubernetes API接口,主要处理 Rest操作以及更新Etcd中的对象。所有资源增删改查的唯一入口。
Scheduler:绑定Pod到Node上,资源调度。
Controller Manager: 所有其他群集级别的功能,目前由控制器Manager执行。资源对象的 自动化控制中心。
Etcd:所有持久化的状态信息存储在Etcd中
Node(计算节点)
Kubelet:管理Pods以及容器、镜像、Volume等,实现对集群 对节点的管理
Kube-proxy:提供网络代理以及负载均衡,实现与Service通讯
kubeadm:引导集群的命令
kubelet:集群中运行任务的代理程序
kubectl:命令行管理工具
实验软件
kube-flannel.yml
recommended.yaml
软件安装
cp -pv /etc/sysctl.conf /etc/sysctl.conf.bak k8s1/k8s2操作
echo net.ipv4.tcp_syncookies = 1 >> /etc/sysctl.conf
echo net.ipv4.tcp_tw_reuse = 1 >> /etc/sysctl.conf
echo net.ipv4.tcp_tw_recycle = 1 >> /etc/sysctl.conf
echo net.ipv4.tcp_fin_timeout = 30 >> /etc/sysctl.conf
echo "vm.max_map_count=262144" >> /etc/sysctl.conf
sysctl -p
hostnamectl set-hostname k8s1/k8s2 操作
echo > /etc/hosts
echo 192.168.10.15 k8s1 > /etc/hosts
echo 192.168.10.14 k8s2 >> /etc/hosts
echo SELINUX=disabled > /etc/sysconfig/selinux k8s1/k8s2操作
systemctl stop firewalld && systemctl disable firewalld
systemctl start ntpd && systemctl enable ntpd
ntpdate ntp.aliyun.com && hwclock -w
swapoff -a && cp -pv /etc/fstab /etc/fstab.bak k8s1/k82操作
sed -i 's/UUID=b34/#UUID=b34/g' /etc/fstab
modprobe br_netfilter k8s1/k8s2操作
sh -c 'echo "br_netfilter" > /etc/modules-load.d/br_netfilter.conf'
lsmod|grep br_netfilter
br_netfilter 22248 0
bridge 107106 1 br_netfilter
touch /etc/sysctl.d/k8s.conf
echo net.bridge.bridge-nf-call-ip6tables = 1 >> /etc/sysctl.d/k8s.conf
echo net.bridge.bridge-nf-call-iptables = 1 >> /etc/sysctl.d/k8s.conf
echo 1 > /proc/sys/net/ipv4/ip_forward k8s1/k8s2操作
scp -pr /etc/yum.repos.d/{docker-ce.repo,kubernetes.repo} root@192.168.10.14:/etc/yum.repos.d/
yum install -y yum-utils net-tools bridge-utils device-mapper-persistent-data docker-ce-18.06.3.ce
yum install -y kubelet-1.15.0 kubectl-1.15.0 kubeadm-1.15.0 k8s1/k8s操作 kubectl-1.15.0指定版本下载
cp -pv /usr/lib/systemd/system/docker.service /usr/lib/systemd/system/docker.service.bak
ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd
docker info | grep systemd
Cgroup Driver: systemd
cp -pv /etc/sysconfig/kubelet /etc/sysconfig/kubelet.bak
echo KUBELET_EXTRA_ARGS="--fail-swap-on=false" > /etc/sysconfig/kubelet k8s1/k8s2操作
systemctl daemon-reload && systemctl enable docker && systemctl start docker k8s1/k8s操作
systemctl daemon-reload && systemctl enable kubelet
docker --version && kubelet --version
Docker version 18.06.1-ce, build e68fc7a
Kubernetes v1.15.0
touch /root/pullk8s.sh && chmod +x /root/pullk8s.sh 拉取镜像 k8s1操作 可选操作
cat /root/pullk8s.sh && sh /root/pullk8s.sh
#!/bin/bash
images=(
kube-apiserver:v1.15.0
kube-controller-manager:v1.15.0
kube-scheduler:v1.15.0
kube-proxy:v1.15.0
etcd:3.3.10
coredns:1.3.1
pause:3.1
)
for imageName in ${images[@]} ; do
docker pull registry.aliyuncs.com/google_containers/$imageName
docker tag registry.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.aliyuncs.com/google_containers/$imageName
done
kubeadm init \
--apiserver-advertise-address=192.168.10.15 \
--image-repository=registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.15.0 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16 k8s1 服务端初始化
kubeadm reset 从新初始化
kubectl get cs 查看群集状态
kubectl get pod
kubeadm join 192.168.10.15:6443 --token fm5nxe.u0sn7lom0fwk65b7 生成群集令牌环
--discovery-token-ca-cert-hash sha256:2e833f2f489a0a6029548566f5155db899944de860c901d4aa6ce792f836af93
kubeadm token create --print-join-command 忘记令牌环可以使用此命令
kubeadm join 192.168.10.15:6443 --token xrhyq0.viahks9tnuufsbfy
--discovery-token-ca-cert-hash sha256:14cae1d7dda1602ea3a8af42b970a6999058b8e1939b8036be71e3533615ebcd
mkdir -p $HOME/.kube && cp -i /etc/kubernetes/admin.conf $HOME/.kube/config k8s1操作
cp -pv /etc/profile /etc/profile.bak k8s1/k8s2操作
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile && source /etc/profile
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s1 NotReady master 4m26s v1.15.0
k8s2 NotReady worker 39s v1.15.0
k8s配置网络
docker pull lizhenliang/flannel:v0.11.0-amd64 k8s1/k8s2操作
docker save lizhenliang/flannel:v0.11.0-amd64 > /root/flannel.tar.gz 导出flannel镜像
docker load < /root/flannel.tar.gz 导入flannel镜像
kubectl apply -f /etc/kubernetes/kube-flannel.yml
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.244.0.0/16
FLANNEL_SUBNET=10.11.0.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true k8s1/k8s2操作
ip addr | grep docker && ip addr | grep flannel
docker0:
inet 172.17.0.1/16 brd 172.17.255.255 scope global docker0
flannel.1:
inet 10.11.0.0/32 scope global flannel.1 k8s1/k8s2操作
k8s创建pod
kubectl create deployment nginx --image=nginx k8s1操作
kubectl expose deployment nginx --port=80 --target-port=80 --type=NodePort
kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx NodePort 10.1.240.181 <none> 80:32418/TCP 19m
kubectl edit svc nginx 修改运行nginx服务
kubectl get pods 查看运行pod
NAME READY STATUS RESTARTS AGE
nginx-554b9c67f9-l78mn 1/1 Runing 0 15h
kubectl delete pods nginx-554b9c67f9-l78mn 删除pod
kubectl get deployments 查看运行控制器
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 0/1 1 0 15h
kubectl logs -f pod-name 查看pod日志
kubectl exec -it nginx-554b9c67f9-l78mn ls / pod外部执行命令
kubectl cp nginx-554b9c67f9-l78mn x:/etc/hosts ./hosts 容器拷贝文件到物理机
kubectl cp hosts nginx-554b9c67f9-l78mn :/tmp/hosts 物理机拷贝文件到容器
k8s控制台
cp -pv recommended.yaml recommended.yaml.bak
spec:
type: NodePort 添加配置
ports:
- port: 443
targetPort: 8443
kubectl apply -f recommended.yaml
kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes-dashboard NodePort 10.1.78.212 <none> 443:30988/TCP 11s
kubectl create serviceaccount dashboard-admin -n kube-system 创建管理用户
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin 绑定为群集管理员
kubectl get secrets -n kube-system | grep dashboard-admin 获取Token
dashboard-admin-token-ktz88
kubectl describe secrets -n kube-system dashboard-admin-token-ktz88
ps aux | grep docker
root 10285 1.5 1.7 485556 50916 ? Ssl 23:31 0:00 /usr/bin/dockerd --exec-opt native.cgroupdriver=systemd
root 10292 1.2 0.8 327152 25224 ? Ssl 23:31 0:00 docker-containerd --config /var/run/docker/containerd/containerd.toml
root 10419 0.0 0.0 112808 968 pts/1 S+ 23:31 0:00 grep --color=auto docker
lsof -i:10250
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
kubelet 11055 root 15u IPv6 112547 0t0 TCP *:10250 (LISTEN)
lsof -i:6443
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
kube-apis 41905 root 7u IPv6 259024 0t0 TCP *:sun-sr-https (LISTEN)
kube-apis 41905 root 80u IPv6 260793 0t0 TCP k8s1:sun-sr-https->k8s1:44312 (ESTABLISHED)
更多推荐
所有评论(0)