k8s 一键发布
环境准备1,k8s(ingress controller, coreDNS, pv 自动供给) 略,看前面的篇幅有介绍2,Helm v3解压包,丢到/usr/bin/ ,详细看helm 安装3,gitlab4,Harbor ,并启用chart存储功能5,mysql (微服务数据库)6,Eureka(注册中心)注释:10.1.234.11 gitlab+nfs+harbor+mysql 4核8G c
环境准备
1,k8s(ingress controller, coreDNS, pv 自动供给) 略,看前面的篇幅有介绍
2,Helm v3 解压包,丢到/usr/bin/ ,详细看helm 安装
3,gitlab
4,Harbor ,并启用chart存储功能
5,mysql (微服务数据库)
6,Eureka(注册中心)
注释:
10.1.234.11 gitlab+nfs+harbor+mysql 4核8G cpu用的较少,主要吃内存
10.1.234.103 master1 上做编译等操作
harbor 安装charts
vim harbor.yml
hostname: 10.1.234.11
./prepare
./install.sh --with-chartmuseum
docker 添加可信任
[root@ansible-11 harbor]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://a960lhr2.mirror.aliyuncs.com"],
"insecure-registries":["10.1.234.11"]
}
#################################################################################
jenkins
[root@k8s-master1 jenkins]# ls
deployment.yml ingress.yml rbac.yml service-account.yml service.yml
[root@k8s-master1 jenkins]# kubectl apply -f .
jenkins-slave
[root@k8s-master1 jenkins-slave]# ls
Dockerfile helm jenkins-slave kubectl settings.xml slave.jar
docker build -t 10.1.234.11/library/jenkins-slave-jdk:1.8 .
[root@k8s-master1 jenkins-slave]# docker push 10.1.234.11/library/jenkins-slave-jdk:1.8
配置jenkins加速,到nfs宿主机
cd /ifs/kubernetes/default-jenkins-pvc-854a1500-d073-413d-8e88-1c139261e88d/updates/
sed -i 's/http:\/\/updates.jenkins-ci.org\/download/https:\/\/mirrors.tuna.tsinghua.edu.cn\/jenkins/g' default.json && \
sed -i 's/http:\/\/www.google.com/https:\/\/www.baidu.com/g' default.json
k8s 没有重启pod的命令,删除代替重启,因为有做nfs持久化数据不会丢失
[root@k8s-master1 ~]# kubectl delete pod jenkins-fcc9d45fc-pbgbh
pod "jenkins-fcc9d45fc-pbgbh" deleted
安装插件件,体验过蚂蚁的速度,替换后变成火箭的速度超快
安装插件,所需插件: Git Parameter/Git/Pipeline/Config File Provider/kubernetes/Extended Choice Parameter/Publish over SSH
#########################################################################
############################################################################
创建kubeconfig文件二进制包安装是没有的,kubeadmin默认是安装的
1,在ansible 机器上生成
[root@ansible-11 ~]# cd ansible-install-k8s/ssl/k8s/
[root@ansible-11 k8s]# ls admin*.pem
admin-key.pem admin.pem
[root@ansible-11 k8s]# cat config.sh
#!/nin/bash
# 设置集群参数
kubectl config set-cluster kubernetes \
--server=https://10.1.234.100:6443 \
--certificate-authority=ca.pem \
--embed-certs=true \
--kubeconfig=config
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=cluster-admin \
--kubeconfig=config
# 设置默认上下文
kubectl config use-context default --kubeconfig=config
# 设置客户端认证参数
kubectl config set-credentials cluster-admin \
--certificate-authority=ca.pem \
--embed-certs=true \
--client-key=admin-key.pem \
--client-certificate=admin.pem \
--kubeconfig=config
拷贝kubectl 命令,在执行脚本
[root@ansible-11 k8s]# scp 10.1.234.103:/usr/bin/kubectl /usr/bin/kubectl
root@10.1.234.103's password:
kubectl 100% 45MB 76.7MB/s 00:00
[root@ansible-11 k8s]# sh config.sh
Cluster "kubernetes" set.
Context "default" created.
Switched to context "default".
User "cluster-admin" set.
查看config
[root@ansible-11 k8s]# cat config
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR2akNDQXFhZ0F3SUJBZ0lVZFhUcDhnOVloNjhsM29xb25SalVFbUN3em1Zd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEl3TURVeU56QTJOVGd3TUZvWERUSTFNRFV5TmpBMk5UZ3dNRm93WlRFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbGFXcHBibWN4RERBSwpCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByZFdKbGNtNWxkR1Z6Ck1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBc3dheUZuUUU4bXBjYXhaUnpkUHoKN0pvSXdkbE81NkZCYUxGRzVtZ3FNMFdVb2h3dTBkbC9VeElMVFNORWs1TDhmWm93U1RiY2VPVmpuSVdUcFhUSgp2ZURqVzZQb2l6ZzZDdWRoTHpxcnFDN2FETnJ3VGxpaEszUmwxUWo3cUFxNVlQRDVCODk2bHFBWWpybjNjemV5CkRtb2JwZ3NOZm42YzMvTnhUWnhNY2U3YTlVbzYzNWt0Rm1Gb1hSdk5BTCtXd0c4MlNIcnhwTE1TcFAzUTlzcW8KZm81SXlWRm5BZVlSYUFQNUVLa0k4QXZwcFNYam1PckVITDZsOFhNc0UrQ0V2YndIc2g1ZEl2eGlmRC92R1UxSAplbE9UcUhCZ3BwNXUxTmpaek5Hci8zNWpPMk05Q3V2K3F2aUFTNzlPRDFSZUh1aFpFQ3d6ZVgrQzBPUnd4NGZ0Cnl3SURBUUFCbzJZd1pEQU9CZ05WSFE4QkFmOEVCQU1DQVFZd0VnWURWUjBUQVFIL0JBZ3dCZ0VCL3dJQkFqQWQKQmdOVkhRNEVGZ1FVTDV4UVJiRDlXbGoyb09iUXlPS2RESjRUNXl3d0h3WURWUjBqQkJnd0ZvQVVMNXhRUmJEOQpXbGoyb09iUXlPS2RESjRUNXl3d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDY2FOd3dwZTdIa0RLUTAyWHhVClFtMGVCRzNFSmlVa3VJL2l3UFJ6QVBPa2hwWGJYbjVlUjZvVGhtYmMxWU5QaFp6Q0gxcXhCRjBURkRoM2JwaG0KNi9sSWFYakZ3QjZVUVpjNUFreTV6NnRQVmtZNkRrVGFuam0rcGVlWEtLMS9xVWw1REtvalFYeE9WczJ0VzN5eQp1ZlQrZE43YWxJMDgzdVptUTJXcEFaQmhncGYvQ1FYMTFZZ1Fnd2VrYllqYmRjaEVTMWhGMlZzTDFidkhwSE9yClU3dUxKaENqdGYxRXdxSy9vckRMQnQvSDg0V2tyb1NtWlYrdzVkSE0rUFlnd0xDYzBrWWFhK1hyM2tUcjNEeU4KSDNKUENDY2d6dkNxd3p2TEhudmRNSWFjbXVMd1Y5a3lRcXo4dzhZelhMd00xMGJKbVlyUmh4YjJFc3NKam1iKwplNTA9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://10.1.234.100:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: cluster-admin
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: cluster-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQzVENDQXNXZ0F3SUJBZ0lVRzlmMFVnaURlTEwwQ3NjZWZIZXdDK054bGVRd0RRWUpLb1pJaHZjTkFRRUwKQlFBd1pURUxNQWtHQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFXcHBibWN4RURBT0JnTlZCQWNUQjBKbAphV3BwYm1jeEREQUtCZ05WQkFvVEEyczRjekVQTUEwR0ExVUVDeE1HVTNsemRHVnRNUk13RVFZRFZRUURFd3ByCmRXSmxjbTVsZEdWek1CNFhEVEl3TURVeU56QTJOVGd3TUZvWERUTXdNRFV5TlRBMk5UZ3dNRm93YXpFTE1Ba0cKQTFVRUJoTUNRMDR4RURBT0JnTlZCQWdUQjBKbGFVcHBibWN4RURBT0JnTlZCQWNUQjBKbGFVcHBibWN4RnpBVgpCZ05WQkFvVERuTjVjM1JsYlRwdFlYTjBaWEp6TVE4d0RRWURWUVFMRXdaVGVYTjBaVzB4RGpBTUJnTlZCQU1UCkJXRmtiV2x1TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF6NDJnTGR4dDVUZGwKV3V5N2IybFQzYWpiMDk2cXZQSVZrRFMwaXZQU0V3N3lWeHUyY3VsZ1FpenhTWk92N21kbEV1cDM3aUo5QUt1NApTeDU0eWZtU2hTZHhyWXZ2bmUxU3hUWlNlM1R5Y3FLOGpEWlNpdVFQeU1YYVBQdldGcGRXTDlkdi85WTNjYzNvCmFvanR1ZytCS09kY3ZBV1M3Z3RaU0tUUFp3LzJ0bFFpTkh0djF5QUxFTGNHWVF2M1F4MVJ1SWNzam45MTd2akYKS1JiRitlZXRQVE1LRU5oOGhMdE5QS2VSOFN2WnFpUytlVDNiY0VhZGZGdGUyNXROTkt6KzI2c0hHOEtMSTdTWQpKSm9JamwyeGc1REpjL0FWZWVlaUQvcTJmcmtjd21acldZRUNwdUI3T0N3M2pLZk1XMjc3K1plRWM0czBXVDBBCnc3QXF2WTdIOHdJREFRQUJvMzh3ZlRBT0JnTlZIUThCQWY4RUJBTUNCYUF3SFFZRFZSMGxCQll3RkFZSUt3WUIKQlFVSEF3RUdDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBd0hRWURWUjBPQkJZRUZBOG0ya3dkREs1MAp1NzVDMTFOalBQUmh4bzlQTUI4R0ExVWRJd1FZTUJhQUZDK2NVRVd3L1ZwWTlxRG0wTWppblF5ZUUrY3NNQTBHCkNTcUdTSWIzRFFFQkN3VUFBNElCQVFCZElwdWE1UDg4eXZkanJhRUtWNURlZjRXZzBkZzVPZkMzVU5SZjErUmsKbzdyaldFOWs2SEkyQTI1VWVUbmdMcHExUUpFR1M4N1dWM2VFRjBrYkxPOEhjcHdsWnNmZlp4TUttSTZJcnhTMgprc0I4M2JkUXorUmtJNVlhYnVTbUIvMTRlbEUyUHBCcWIxWTFURDFLZ0dTUEN2Ri83NlROdElPQVFKV3E2T1BxCkUwVEN6b3hyc1dEZXFGK28vMnhiS1R4b1d3anA4RjlZdVI5SGtSR0tmU05PMC9GaUZXdmlDY1RZMWdmTHR1N24KU1Jha01rK09nd0o1N0JPRmNXcVd0RW5FY2V3c3lRck5YSDROdXVrVGViYVlCUEJ6Y3F0b21PVzk0emlXdVN3MAp0am01aWErSERyUk5oQ1pSWmsvak9GRlk4TkJ0UklSbmFBdlJRZzV1YWcyNAotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBejQyZ0xkeHQ1VGRsV3V5N2IybFQzYWpiMDk2cXZQSVZrRFMwaXZQU0V3N3lWeHUyCmN1bGdRaXp4U1pPdjdtZGxFdXAzN2lKOUFLdTRTeDU0eWZtU2hTZHhyWXZ2bmUxU3hUWlNlM1R5Y3FLOGpEWlMKaXVRUHlNWGFQUHZXRnBkV0w5ZHYvOVkzY2Mzb2FvanR1ZytCS09kY3ZBV1M3Z3RaU0tUUFp3LzJ0bFFpTkh0dgoxeUFMRUxjR1lRdjNReDFSdUljc2puOTE3dmpGS1JiRitlZXRQVE1LRU5oOGhMdE5QS2VSOFN2WnFpUytlVDNiCmNFYWRmRnRlMjV0Tk5LeisyNnNIRzhLTEk3U1lKSm9JamwyeGc1REpjL0FWZWVlaUQvcTJmcmtjd21acldZRUMKcHVCN09DdzNqS2ZNVzI3NytaZUVjNHMwV1QwQXc3QXF2WTdIOHdJREFRQUJBb0lCQUZrMXpaaUl1MitmYk9BMQpDcWRyaFZHc0I3aThaM1RKTlAyRHVlcFR2SUxXbE5GZUp3Q2pVTzJBdGNGalNtWlZhTHRGcm9EYnNCRCt2anNECmdQV0hZVFA4YlQ4ais3MU15WnZjcW4xSUdnR3Q4dzd0Tm5OZWJXNWxwRlU4Qlp3Mk5pUmdIRkxCdDVraDRQa1IKbStTeVQxak9nU3lMMU1pZWRpVGtPZ1ZaZGppTmtPeGxFZDBobENSWExOd05veEJWRlQ0eWNSbXBQRGRaY3JsSAprdGtvMmZhY21oMGdvWFhBVU1iSVlFYmNKRFhXMVRQSkFZWjloa0VUb2JzS09iR2dmVCtOVlBxV1dCU1FUcTRuCjJhVnJUY2hNVmszYWlRYzVrb2Fpd2UzSktMY000VW1TOU5QbmhBUktnbytZaFp4aVhxYTBaRWtoVUZQRTZ6TEEKYmltZGQ2a0NnWUVBNmhKN1drYmRodVZ4bzFYVjZXd3laMTA5UHl1VnlOanUrbmlNTllOaks1YjJHN1BxejJXKwozclE4NUtQb2Q5aUZuZ3hGRmx0ZTdDakFiME5FV3VZL2RuRkQ1aXN1RTNOUWJ6Y1l2UVNUeEZ2enp6ei9OZDlvCmNmRURrOWNsMXVEYkJUNDY5dmw2Zm5oK2xTNFVCa2p5dnRIa3lBSDFSWWVocmJOSTc0SnpqMDBDZ1lFQTR2OHEKZzRiTGtldC9aTkJhUzhQMWJ5T09EcThUaVYxcklma3BLc0NmdVZGNlo2VVc2WXN3bkR4Tm8rY1ZQZDNxMGYyVwpXQXhoU3VHUjZjekhIMFp3Rm5YMUg3a0RVU040R01QYjUzNjZxQjNsbzcyU2N6MkZaYmhab3Bjd1ZoNlFxR0dZCktVS1RLaHZWdmEvZ3RmeS9PZXRIM3M5T0NKVWZzMTBTc2c1ZWxEOENnWUJCWnppZ1dQeTlISTlNSEVmUkdUSmMKUDJsRnBXQU01dENmbTk5UmRJelpPUzA3UUdKejRQSWNPREJya29ENk83M2FFNE9hWTUxNlQwaTF6ZzNqZUVKegpUZUt6QlkyeVVaNC85UjBzYzRMMmN0c0ZKUDRLaDRvZmpFQjdwcm9qK0dBNmJ0RDdBV3FBdkJwaWhLV0R2QTVYCjB6NGh4S3BMNjBINXp3YXFKY0UwWVFLQmdCN3ltUEk3bzI5dWd2elVweXJqT1lnQWVmS29qZXBSY1RTOWlvNUkKbXI0aVRLVHF6aGZqSVNwNzBqNEJXRlVKOTJUUHhQcVdIWUdmQlV5eDNZZjNDM3NVYytueVNIQjhmSGdyNDhSSQo4OXZOejZ3bVFWd3l6QWdWUFNzOG41dHVLVnk3aVlvK2lGL2FVYWE5M3M0USttVkVIWlZVQUJpTmREZjQvWDZICkZQL3pBb0dBZSs1Y2kxZ056aU41YWlnMFBXNjlWOEV2ZlM0UjN0YmliNHNNTEN0am9rMlNpRlFOTlFMc0YrUzEKcXRENk0zNWdhT1RtS2pXU2dhZVdCN3c5UjJXTUFIaG52aHc0UWlIR1NBcEZWTW1rZ0JaN1FVZFQvUGlFV0lxTAp4ZDcyczl6czRxdHFmb3lzTWRBMnVWVEFOVDhWWjlQc09teHJram9tZlg3Z2dob1pNL0U9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
验证,直接访问是不行的
[root@ansible-11 k8s]# kubectl get pods
The connection to the server localhost:8080 was refused - did you specify the right host or port?
访问要加配置文件
[root@ansible-11 k8s]# kubectl --kubeconfig=config get pods
NAME READY STATUS RESTARTS AGE
jenkins-fcc9d45fc-z2h7r 1/1 Running 0 47h
nfs-client-provisioner-5f5d87bf6c-vjzks 1/1 Running 0 3d
###########################################################################
添加凭据
#########################################################################
添加helm凭证
[root@k8s-master1 pkg]# helm repo add --username admin --password Harbor12345 myrepo http://10.1.234.11/chartrepo/microservice
"myrepo" has been added to your repositories
推送包到helm
helm push ms-0.1.0.tgz --username=root --password=Harbor12345 http://10.1.234.11/chartrepo/microservice
######################################################
jenkins创建项目
#!/usr/bin/env groovy
// 所需插件: Git Parameter/Git/Pipeline/Config File Provider/kubernetes/Extended Choice Parameter
// 公共
def registry = "10.1.234.11"
// 项目
def project = "microservice"
def git_url = "http://10.1.234.11:9999/ms/simple-microservice.git"
def gateway_domain_name = "gateway.ctnrs.com"
def portal_domain_name = "portal.ctnrs.com"
// 认证
def image_pull_secret = "registry-pull-secret"
def harbor_registry_auth = "3bd5bb4f-0b0a-4de0-8146-c1e1e9e24a98"
def git_auth = "05854839-c86d-40b4-b8f4-0075ccd0e0ea"
// ConfigFileProvider ID
def k8s_auth = "bfcd73c6-4a89-4e9d-8c28-7016bc1c087a"
pipeline {
agent {
kubernetes {
label "jenkins-slave"
yaml """
kind: Pod
metadata:
name: jenkins-slave
spec:
containers:
- name: jnlp
image: "${registry}/library/jenkins-slave-jdk:1.8"
imagePullPolicy: Always
volumeMounts:
- name: docker-cmd
mountPath: /usr/bin/docker
- name: docker-sock
mountPath: /var/run/docker.sock
- name: maven-cache
mountPath: /root/.m2
volumes:
- name: docker-cmd
hostPath:
path: /usr/bin/docker
- name: docker-sock
hostPath:
path: /var/run/docker.sock
- name: maven-cache
hostPath:
path: /tmp/m2
"""
}
}
parameters {
gitParameter branch: '', branchFilter: '.*', defaultValue: 'master', description: '选择发布的分支', name: 'Branch', quickFilterEnabled: false, selectedValue: 'NONE', sortMode: 'NONE', tagFilter: '*', type: 'PT_BRANCH'
extendedChoice defaultValue: 'none', description: '选择发布的微服务', \
multiSelectDelimiter: ',', name: 'Service', type: 'PT_CHECKBOX', \
value: 'gateway-service:9999,portal-service:8080,product-service:8010,order-service:8020,stock-service:8030'
choice (choices: ['ms', 'demo'], description: '部署模板', name: 'Template')
choice (choices: ['1', '3', '5', '7'], description: '副本数', name: 'ReplicaCount')
choice (choices: ['ms'], description: '命名空间', name: 'Namespace')
}
stages {
stage('拉取代码'){
steps {
checkout([$class: 'GitSCM',
branches: [[name: "${params.Branch}"]],
doGenerateSubmoduleConfigurations: false,
extensions: [], submoduleCfg: [],
userRemoteConfigs: [[credentialsId: "${git_auth}", url: "${git_url}"]]
])
}
}
stage('代码编译') {
// 编译指定服务
steps {
sh """
mvn clean package -Dmaven.test.skip=true
"""
}
}
stage('构建镜像') {
steps {
withCredentials([usernamePassword(credentialsId: "${harbor_registry_auth}", passwordVariable: 'password', usernameVariable: 'username')]) {
sh """
docker login -u ${username} -p '${password}' ${registry}
for service in \$(echo ${Service} |sed 's/,/ /g'); do
service_name=\${service%:*}
image_name=${registry}/${project}/\${service_name}:${BUILD_NUMBER}
cd \${service_name}
if ls |grep biz &>/dev/null; then
cd \${service_name}-biz
fi
docker build -t \${image_name} .
docker push \${image_name}
cd ${WORKSPACE}
done
"""
configFileProvider([configFile(fileId: "${k8s_auth}", targetLocation: "admin.kubeconfig")]){
sh """
# 添加镜像拉取认证
kubectl create secret docker-registry ${image_pull_secret} --docker-username=${username} --docker-password=${password} --docker-server=${registry} -n ${Namespace} --kubeconfig admin.kubeconfig |true
# 添加私有chart仓库
helm repo add --username ${username} --password ${password} myrepo http://${registry}/chartrepo/${project}
"""
}
}
}
}
stage('Helm部署到K8S') {
steps {
sh """
common_args="-n ${Namespace} --kubeconfig admin.kubeconfig"
for service in \$(echo ${Service} |sed 's/,/ /g'); do
service_name=\${service%:*}
service_port=\${service#*:}
image=${registry}/${project}/\${service_name}
tag=${BUILD_NUMBER}
helm_args="\${service_name} --set image.repository=\${image} --set image.tag=\${tag} --set replicaCount=${replicaCount} --set imagePullSecrets[0].name=${image_pull_secret} --set service.targetPort=\${service_port} myrepo/${Template}"
# 判断是否为新部署
if helm history \${service_name} \${common_args} &>/dev/null;then
action=upgrade
else
action=install
fi
# 针对服务启用ingress
if [ \${service_name} == "gateway-service" ]; then
helm \${action} \${helm_args} \
--set ingress.enabled=true \
--set ingress.host=${gateway_domain_name} \
\${common_args}
elif [ \${service_name} == "portal-service" ]; then
helm \${action} \${helm_args} \
--set ingress.enabled=true \
--set ingress.host=${portal_domain_name} \
\${common_args}
else
helm \${action} \${helm_args} \${common_args}
fi
done
# 查看Pod状态
sleep 10
kubectl get pods \${common_args}
"""
}
}
}
}
第一次构建会失败,第二次,java mave的也会失败
###############################################
踩坑记,helm 一直部署不成功
我以为是helm问题,在第二台master安装helm,用helm跑了一个 ui,看见helm部署应用成功
helm install ui stable/weave-scope
再回来跑 jenkins,项目就成功了,感觉是缓存
更多推荐
所有评论(0)