K8S 安装步骤
二、安装kubeadm1.设置基础环境2.安装kubelet、kubeadm、kubectl三、使用kubeadm引导集群1、下载各个机器需要的镜像2.初始化主节点3.记录生成的消息4.执行生成的语句3.安装网络组件安装NFS1.配置NFS服务器(master上)2.搭建NFS-CLIENT3.设置动态供应三、安装metrics-server四、安装kubeshere1.下载官方文件https:/
·
K8S 安装步骤
一、为每台机器安装docker
#1、安装docker
##1.1、卸载旧版本
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
##1.2、安装基础依赖
yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
##1.3、配置docker yum源
sudo yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
##1.4、安装并启动 docker
yum install -y docker-ce-19.03.8 docker-ce-cli-19.03.8 containerd.io
systemctl enable docker
systemctl start docker
##1.5、配置docker加速
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://t1gbabbr.mirror.aliyuncs.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
二、安装kubeadm
1.设置基础环境
#各个机器设置自己的域名
hostnamectl set-hostname xxxx
# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
#关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
#允许 iptables 检查桥接流量
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sudo sysctl --system
2.安装kubelet、kubeadm、kubectl
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
sudo yum install -y kubelet-1.20.9 kubeadm-1.20.9 kubectl-1.20.9 --disableexcludes=kubernetes
sudo systemctl enable --now kubelet
三、使用kubeadm引导集群
1、下载各个机器需要的镜像
sudo tee ./images.sh <<-'EOF'
#!/bin/bash
images=(
kube-apiserver:v1.20.9
kube-proxy:v1.20.9
kube-controller-manager:v1.20.9
kube-scheduler:v1.20.9
coredns:1.7.0
etcd:3.4.13-0
pause:3.2
)
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/$imageName
done
EOF
chmod +x ./images.sh && ./images.sh
2.初始化主节点
#所有机器添加master域名映射,以下需要修改为自己的
echo "172.31.0.3 cluster-endpoint" >> /etc/hosts
#主节点初始化
kubeadm init \
--apiserver-advertise-address=172.31.0.3 \
--control-plane-endpoint=master \
--image-repository registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images \
--kubernetes-version v1.20.9 \
--service-cidr=10.96.0.0/16 \
--pod-network-cidr=192.168.0.0/16
#所有网络范围不重叠
3.记录生成的消息
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join cluster-endpoint:6443 --token e77knp.kge8pdm0g3wh5693 \
--discovery-token-ca-cert-hash sha256:fd4613d9e1f254303cd42f33f6ac38744e30c3650602bd68e9f8b45ac423f860 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join cluster-endpoint:6443 --token e77knp.kge8pdm0g3wh5693 \
--discovery-token-ca-cert-hash sha256:fd4613d9e1f254303cd42f33f6ac38744e30c3650602bd68e9f8b45ac423f860
4.执行生成的语句
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
3.安装网络组件
curl https://docs.projectcalico.org/manifests/calico.yaml -O
kubectl apply -f calico.yaml
安装NFS
1.配置NFS服务器(master上)
yum install -y nfs-utils
#执行命令 vi /etc/exports,创建 exports 文件,文件内容如下:
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
#/nfs/data 172.26.248.0/20(rw,no_root_squash)
#执行以下命令,启动 nfs 服务
# 创建共享目录
mkdir -p /nfs/data
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server
exportfs -r
#检查配置是否生效
exportfs
# 输出结果如下所示
/nfs/data /nfs/data
2.搭建NFS-CLIENT
#服务器端防火墙开放111、662、875、892、2049的 tcp / udp 允许,否则远端客户无法连接。
#安装客户端工具
yum install -y nfs-utils
#执行以下命令检查 nfs 服务器端是否有设置共享目录
# showmount -e $(nfs服务器的IP)
showmount -e 172.31.0.3
# 输出结果如下所示
Export list for 172.31.0.3
/nfs/data *
#执行以下命令挂载 nfs 服务器上的共享目录到本机路径 /root/nfsmount
mkdir /root/nfsmount
# mount -t nfs $(nfs服务器的IP):/root/nfs_root /root/nfsmount
#高可用备份的方式
mount -t nfs 172.31.0.3:/nfs/data /root/nfsmount
# 写入一个测试文件
echo "hello nfs server" > /root/nfsmount/test.txt
#在 nfs 服务器上执行以下命令,验证文件写入成功
cat /data/volumes/test.txt
3.设置动态供应
# 先创建授权
# vi nfs-rbac.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["services", "endpoints"]
verbs: ["get","create","list", "watch","update"]
- apiGroups: ["extensions"]
resources: ["podsecuritypolicies"]
resourceNames: ["nfs-provisioner"]
verbs: ["use"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-provisioner
subjects:
- kind: ServiceAccount
name: nfs-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
#vi nfs-deployment.yaml;创建nfs-client的授权
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccount: nfs-provisioner
containers:
- name: nfs-client-provisioner
image: lizhenliang/nfs-client-provisioner
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME #供应者的名字
value: storage.pri/nfs #名字虽然可以随便起,以后引用要一致
- name: NFS_SERVER
value: 172.31.0.3
- name: NFS_PATH
value: /nfs/data
volumes:
- name: nfs-client-root
nfs:
server: 172.31.0.3
path: /nfs/data
##这个镜像中volume的mountPath默认为/persistentvolumes,不能修改,否则运行时会报错
三、安装metrics-server
#1、先安装metrics-server(yaml如下,已经改好了镜像和配置,可以直接使用),这样就能监控到pod。node的资源情况(默认只有cpu、memory的资源审计信息哟,更专业的我们后面对接 Prometheus)
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:aggregated-metrics-reader
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1beta1.metrics.k8s.io
spec:
service:
name: metrics-server
namespace: kube-system
group: metrics.k8s.io
version: v1beta1
insecureSkipTLSVerify: true
groupPriorityMinimum: 100
versionPriority: 100
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: metrics-server
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: metrics-server
namespace: kube-system
labels:
k8s-app: metrics-server
spec:
selector:
matchLabels:
k8s-app: metrics-server
template:
metadata:
name: metrics-server
labels:
k8s-app: metrics-server
spec:
hostNetwork: true
serviceAccountName: metrics-server
volumes:
# mount in tmp so we can safely use from-scratch images and/or read-only containers
- name: tmp-dir
emptyDir: {}
containers:
- name: metrics-server
image: mirrorgooglecontainers/metrics-server-amd64:v0.3.6
imagePullPolicy: IfNotPresent
args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
ports:
- name: main-port
containerPort: 4443
protocol: TCP
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- name: tmp-dir
mountPath: /tmp
nodeSelector:
kubernetes.io/os: linux
kubernetes.io/arch: "amd64"
---
apiVersion: v1
kind: Service
metadata:
name: metrics-server
namespace: kube-system
labels:
kubernetes.io/name: "Metrics-server"
kubernetes.io/cluster-service: "true"
spec:
selector:
k8s-app: metrics-server
ports:
- port: 443
protocol: TCP
targetPort: main-port
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
四、安装kubeshere
1.下载官方文件
https://kubesphere.io/zh/docs/v3.3/quick-start/minimal-kubesphere-on-k8s/
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.1/kubesphere-installer.yaml
kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.3.1/cluster-configuration.yaml
2.修改集群配置文件 cluster
vi cluster-configuration.yaml
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.0.0
spec:
persistence:
storageClass: "" # If there is not a default StorageClass in your cluster, you need to specify an existing StorageClass here.
authentication:
jwtSecret: "" # Keep the jwtSecret consistent with the host cluster. Retrive the jwtSecret by executing "kubectl -n kubesphere-system get cm kubesphere-config -o yaml | grep -v "apiVersion" | grep jwtSecret" on the host cluster.
etcd:
monitoring: true # Whether to enable etcd monitoring dashboard installation. You have to create a secret for etcd before you enable it.
endpointIps: 172.31.0.3 # etcd cluster EndpointIps, it can be a bunch of IPs here.
port: 2379 # etcd port
tlsEnable: true
common:
mysqlVolumeSize: 20Gi # MySQL PVC size.
minioVolumeSize: 20Gi # Minio PVC size.
etcdVolumeSize: 20Gi # etcd PVC size.
openldapVolumeSize: 2Gi # openldap PVC size.
redisVolumSize: 2Gi # Redis PVC size.
es: # Storage backend for logging, events and auditing.
# elasticsearchMasterReplicas: 1 # total number of master nodes, it's not allowed to use even number
# elasticsearchDataReplicas: 1 # total number of data nodes.
elasticsearchMasterVolumeSize: 4Gi # Volume size of Elasticsearch master nodes.
elasticsearchDataVolumeSize: 20Gi # Volume size of Elasticsearch data nodes.
logMaxAge: 7 # Log retention time in built-in Elasticsearch, it is 7 days by default.
elkPrefix: logstash # The string making up index names. The index name will be formatted as ks-<elk_prefix>-log.
console:
enableMultiLogin: true # enable/disable multiple sing on, it allows an account can be used by different users at the same time.
port: 30880
alerting: # (CPU: 0.3 Core, Memory: 300 MiB) Whether to install KubeSphere alerting system. It enables Users to customize alerting policies to send messages to receivers in time with different time intervals and alerting levels to choose from.
enabled: true
auditing: # Whether to install KubeSphere audit log system. It provides a security-relevant chronological set of records,recording the sequence of activities happened in platform, initiated by different tenants.
enabled: true
devops: # (CPU: 0.47 Core, Memory: 8.6 G) Whether to install KubeSphere DevOps System. It provides out-of-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
enabled: true
jenkinsMemoryLim: 2Gi # Jenkins memory limit.
jenkinsMemoryReq: 1500Mi # Jenkins memory request.
jenkinsVolumeSize: 8Gi # Jenkins volume size.
jenkinsJavaOpts_Xms: 512m # The following three fields are JVM parameters.
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
events: # Whether to install KubeSphere events system. It provides a graphical web console for Kubernetes Events exporting, filtering and alerting in multi-tenant Kubernetes clusters.
enabled: true
ruler:
enabled: true
replicas: 2
logging: # (CPU: 57 m, Memory: 2.76 G) Whether to install KubeSphere logging system. Flexible logging functions are provided for log query, collection and management in a unified console. Additional log collectors can be added, such as Elasticsearch, Kafka and Fluentd.
enabled: true
logsidecarReplicas: 2
metrics_server: # (CPU: 56 m, Memory: 44.35 MiB) Whether to install metrics-server. IT enables HPA (Horizontal Pod Autoscaler).
enabled: false
monitoring:
# prometheusReplicas: 1 # Prometheus replicas are responsible for monitoring different segments of data source and provide high availability as well.
prometheusMemoryRequest: 400Mi # Prometheus request memory.
prometheusVolumeSize: 20Gi # Prometheus PVC size.
# alertmanagerReplicas: 1 # AlertManager Replicas.
multicluster:
clusterRole: none # host | member | none # You can install a solo cluster, or specify it as the role of host or member cluster.
networkpolicy: # Network policies allow network isolation within the same cluster, which means firewalls can be set up between certain instances (Pods).
# Make sure that the CNI network plugin used by the cluster supports NetworkPolicy. There are a number of CNI network plugins that support NetworkPolicy, including Calico, Cilium, Kube-router, Romana and Weave Net.
enabled: true
notification: # Email Notification support for the legacy alerting system, should be enabled/disabled together with the above alerting option.
enabled: true
openpitrix: # (2 Core, 3.6 G) Whether to install KubeSphere Application Store. It provides an application store for Helm-based applications, and offer application lifecycle management.
enabled: true
servicemesh: # (0.3 Core, 300 MiB) Whether to install KubeSphere Service Mesh (Istio-based). It provides fine-grained traffic management, observability and tracing, and offer visualization for traffic topology.
enabled: true
3.查看安装进度
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f
kubectl delete pod ks-installer-895b8994d-b8lnw --force --grace-period=0 -n kubesphere-system
version: "3"
services:
# rabbitmq:
# image: rabbitmq:management
# ports:
# - 5672:5672
# - 15672:15672
# networks:
# - my-network
# kafka:
# container_name: kafka
# image: spotify/kafka:latest
# ports:
# - 2181:2181
# - 9092:9092
# networks:
# - my-network
# zipkin:
# image: openzipkin/zipkin
# ports:
# - 9411:9411
# networks:
# - my-network
redis:
image: redis
ports:
- 6379:6379
networks:
- my-network
ts-ui-dashboard:
build: ts-ui-dashboard
image: ${IMG_REPO}/ts-ui-dashboard:${IMG_TAG}
restart: always
ports:
- 8080:8080
networks:
- my-network
ts-auth-service:
build: ts-auth-service
image: ${IMG_REPO}/ts-auth-service:${IMG_TAG}
restart: always
ports:
- 12340:12340
networks:
- my-network
ts-auth-mongo:
image: mongo
networks:
- my-network
ts-user-service:
build: ts-user-service
image: ${IMG_REPO}/ts-user-service:${IMG_TAG}
restart: always
ports:
- 12342:12342
networks:
- my-network
ts-user-mongo:
image: mongo
networks:
- my-network
ts-verification-code-service:
build: ts-verification-code-service
image: ${IMG_REPO}/ts-verification-code-service:${IMG_TAG}
restart: always
ports:
- 15678:15678
networks:
- my-network
ts-account-mongo:
image: mongo
networks:
- my-network
ts-route-service:
build: ts-route-service
image: ${IMG_REPO}/ts-route-service:${IMG_TAG}
restart: always
ports:
- 11178:11178
networks:
- my-network
ts-route-mongo:
image: mongo
networks:
- my-network
ts-contacts-service:
build: ts-contacts-service
image: ${IMG_REPO}/ts-contacts-service:${IMG_TAG}
restart: always
ports:
- 12347:12347
networks:
- my-network
ts-contacts-mongo:
image: mongo
networks:
- my-network
ts-order-service:
build: ts-order-service
image: ${IMG_REPO}/ts-order-service:${IMG_TAG}
restart: always
ports:
- 12031:12031
networks:
- my-network
ts-order-mongo:
image: mongo
networks:
- my-network
ts-order-other-service:
build: ts-order-other-service
image: ${IMG_REPO}/ts-order-other-service:${IMG_TAG}
restart: always
ports:
- 12032:12032
networks:
- my-network
ts-order-other-mongo:
image: mongo
networks:
- my-network
ts-config-service:
build: ts-config-service
image: ${IMG_REPO}/ts-config-service:${IMG_TAG}
restart: always
ports:
- 15679:15679
networks:
- my-network
ts-config-mongo:
image: mongo
networks:
- my-network
ts-station-service:
build: ts-station-service
image: ${IMG_REPO}/ts-station-service:${IMG_TAG}
restart: always
ports:
- 12345:12345
networks:
- my-network
ts-station-mongo:
image: mongo
networks:
- my-network
ts-train-service:
build: ts-train-service
image: ${IMG_REPO}/ts-train-service:${IMG_TAG}
restart: always
ports:
- 14567:14567
networks:
- my-network
ts-train-mongo:
image: mongo
networks:
- my-network
ts-travel-service:
build: ts-travel-service
image: ${IMG_REPO}/ts-travel-service:${IMG_TAG}
restart: always
ports:
- 12346:12346
networks:
- my-network
ts-travel-mongo:
image: mongo
networks:
- my-network
ts-travel2-service:
build: ts-travel2-service
image: ${IMG_REPO}/ts-travel2-service:${IMG_TAG}
restart: always
ports:
- 16346:16346
networks:
- my-network
ts-travel2-mongo:
image: mongo
networks:
- my-network
ts-preserve-service:
build: ts-preserve-service
image: ${IMG_REPO}/ts-preserve-service:${IMG_TAG}
restart: always
ports:
- 14568:14568
networks:
- my-network
ts-preserve-other-service:
build: ts-preserve-other-service
image: ${IMG_REPO}/ts-preserve-other-service:${IMG_TAG}
restart: always
ports:
- 14569:14569
networks:
- my-network
ts-basic-service:
build: ts-basic-service
image: ${IMG_REPO}/ts-basic-service:${IMG_TAG}
restart: always
ports:
- 15680:15680
networks:
- my-network
ts-ticketinfo-service:
build: ts-ticketinfo-service
image: ${IMG_REPO}/ts-ticketinfo-service:${IMG_TAG}
restart: always
ports:
- 15681:15681
networks:
- my-network
ts-price-service:
build: ts-price-service
image: ${IMG_REPO}/ts-price-service:${IMG_TAG}
restart: always
ports:
- 16579:16579
networks:
- my-network
ts-price-mongo:
image: mongo
networks:
- my-network
ts-notification-service:
build: ts-notification-service
image: ${IMG_REPO}/ts-notification-service:${IMG_TAG}
restart: always
ports:
- 17853:17853
networks:
- my-network
ts-security-service:
build: ts-security-service
image: ${IMG_REPO}/ts-security-service:${IMG_TAG}
restart: always
ports:
- 11188:11188
networks:
- my-network
ts-security-mongo:
image: mongo
networks:
- my-network
ts-inside-payment-service:
build: ts-inside-payment-service
image: ${IMG_REPO}/ts-inside-payment-service:${IMG_TAG}
restart: always
ports:
- 18673:18673
networks:
- my-network
ts-inside-payment-mongo:
image: mongo
networks:
- my-network
ts-execute-service:
build: ts-execute-service
image: ${IMG_REPO}/ts-execute-service:${IMG_TAG}
restart: always
ports:
- 12386:12386
networks:
- my-network
ts-payment-service:
build: ts-payment-service
image: ${IMG_REPO}/ts-payment-service:${IMG_TAG}
restart: always
ports:
- 19001:19001
networks:
- my-network
ts-payment-mongo:
image: mongo
networks:
- my-network
ts-rebook-service:
build: ts-rebook-service
image: ${IMG_REPO}/ts-rebook-service:${IMG_TAG}
restart: always
ports:
- 18886:18886
networks:
- my-network
ts-rebook-mongo:
image: mongo
networks:
- my-network
ts-cancel-service:
build: ts-cancel-service
image: ${IMG_REPO}/ts-cancel-service:${IMG_TAG}
restart: always
ports:
- 18885:18885
networks:
- my-network
ts-assurance-service:
build: ts-assurance-service
image: ${IMG_REPO}/ts-assurance-service:${IMG_TAG}
restart: always
ports:
- 18888:18888
networks:
- my-network
ts-assurance-mongo:
image: mongo
networks:
- my-network
ts-seat-service:
build: ts-seat-service
image: ${IMG_REPO}/ts-seat-service:${IMG_TAG}
restart: always
ports:
- 18898:18898
networks:
- my-network
ts-travel-plan-service:
build: ts-travel-plan-service
image: ${IMG_REPO}/ts-travel-plan-service:${IMG_TAG}
restart: always
ports:
- 14322:14322
networks:
- my-network
ts-ticket-office-service:
build: ts-ticket-office-service
image: ${IMG_REPO}/ts-ticket-office-service:${IMG_TAG}
restart: always
ports:
- 16108:16108
networks:
- my-network
ts-ticket-office-mongo:
image: mongo
networks:
- my-network
ts-news-service:
build: ts-news-service
image: ${IMG_REPO}/ts-news-service:${IMG_TAG}
restart: always
ports:
- 12862:12862
networks:
- my-network
ts-news-mongo:
image: mongo
networks:
- my-network
ts-voucher-mysql:
image: mysql
expose:
- "3306"
environment:
MYSQL_ROOT_PASSWORD: root
networks:
- my-network
ts-voucher-service:
build: ts-voucher-service
image: ${IMG_REPO}/ts-voucher-service:${IMG_TAG}
restart: always
ports:
- 16101:16101
depends_on:
- ts-voucher-mysql
volumes:
- /var/lib/mysql
networks:
- my-network
ts-food-map-service:
build: ts-food-map-service
image: ${IMG_REPO}/ts-food-map-service:${IMG_TAG}
restart: always
ports:
- 18855:18855
networks:
- my-network
ts-food-map-mongo:
image: mongo
networks:
- my-network
ts-route-plan-service:
build: ts-route-plan-service
image: ${IMG_REPO}/ts-route-plan-service:${IMG_TAG}
restart: always
ports:
- 14578:14578
networks:
- my-network
ts-food-service:
build: ts-food-service
image: ${IMG_REPO}/ts-food-service:${IMG_TAG}
restart: always
ports:
- 18856:18856
networks:
- my-network
ts-consign-service:
build: ts-consign-service
image: ${IMG_REPO}/ts-consign-service:${IMG_TAG}
restart: always
ports:
- 16111:16111
networks:
- my-network
ts-consign-mongo:
image: mongo
networks:
- my-network
ts-consign-price-service:
build: ts-consign-price-service
image: ${IMG_REPO}/ts-consign-price-service:${IMG_TAG}
restart: always
ports:
- 16110:16110
networks:
- my-network
ts-consign-price-mongo:
image: mongo
networks:
- my-network
ts-food-mongo:
image: mongo
networks:
- my-network
ts-admin-basic-info-service:
build: ts-admin-basic-info-service
image: ${IMG_REPO}/ts-admin-basic-info-service:${IMG_TAG}
restart: always
ports:
- 18767:18767
networks:
- my-network
ts-admin-order-service:
build: ts-admin-order-service
image: ${IMG_REPO}/ts-admin-order-service:${IMG_TAG}
restart: always
ports:
- 16112:16112
networks:
- my-network
ts-admin-route-service:
build: ts-admin-route-service
image: ${IMG_REPO}/ts-admin-route-service:${IMG_TAG}
restart: always
ports:
- 16113:16113
deploy:
replicas: 1
restart_policy:
condition: on-failure
networks:
- my-network
ts-admin-travel-service:
build: ts-admin-travel-service
image: ${IMG_REPO}/ts-admin-travel-service:${IMG_TAG}
restart: always
ports:
- 16114:16114
deploy:
replicas: 1
restart_policy:
condition: on-failure
networks:
- my-network
ts-admin-user-service:
build: ts-admin-user-service
image: ${IMG_REPO}/ts-admin-user-service:${IMG_TAG}
restart: always
ports:
- 16115:16115
deploy:
replicas: 1
restart_policy:
condition: on-failure
networks:
- my-network
ts-avatar-service:
image: ${NAMESPACE}/ts-avatar-service:${TAG}
restart: always
ports:
- 17001:17001
networks:
- my-network
networks:
my-network:
# driver: overlay
driver: bridge
更多推荐
已为社区贡献1条内容
所有评论(0)