1 组件安装(Linux)

1.1 Go

# 下载
cd /usr/local/src
sudo wget https://studygolang.com/dl/golang/go1.16.5.linux-amd64.tar.gz
sudo tar -xf go1.16.5.linux-amd64.tar.gz

# 配置环境变量
sudo vim /etc/profile
export GOROOT=/usr/local/src/go
export PATH=$GOROOT/bin:$PATH
export GOPATH=$HOME/goprojects/
export GO111MODULE=auto
export GOPROXY=https://goproxy.cn,direct

source /etc/profile

# 创建目录
mkdir -p $HOME/goprojects/{src,pkg}

1.2 Nodejs

nodejs下载地址:https://nodejs.org/en/download/

# 下载
cd /usr/local/src
sudo wget https://nodejs.org/dist/v14.16.0/node-v14.16.0-linux-x64.tar.gz
sudo mv node-v14.16.0-linux-x64 nodejs
安装cnpm v7.1.0
# 若已有cnmp,查看版本,如若版本高于8.0+,则需要卸载
sudo npm uninstall -g cnpm

# 安装cnpm v7.1.0
sudo npm install -g cnpm@7.1.0 --registry=https://registry.npm.taobao.org
 创建软链接
sudo ln -s /usr/local/src/nodejs/bin/npm /usr/local/bin
sudo ln -s /usr/local/src/nodejs/bin/node /usr/local/bin
sudo ln -s /usr/local/src/nodejs/bin/cnpm /usr/local/bin
sudo ln -s /usr/local/src/nodejs/bin/npm /usr/bin
sudo ln -s /usr/local/src/nodejs/bin/node /usr/bin
sudo ln -s /usr/local/src/nodejs/bin/cnpm /usr/bin

2 代码编译打包

2.1 代码编译

# 1. 代码编译
cd $GOPATH/src

# 这里需要注意,如果代码是直接download下来的zip包,后边代码编译时会出错
git clone -b release-v3.10.17 https://github.com/Tencent/bk-cmdb configcenter
# 2. 前后端一块编译,cnpm版本小于8.0,否则编译不过
make NPM=cnpm
# 3. 打包package
make package
# 产物生成目录
${GOPATH}/src/configcenter/src/bin/pub/cmdb

前后端分别编译:

#单独编译前端
make ui NPM=cnpm
#单独编译后端
make server

3 K8s部署cmdb

3.1 Redis

kind: ConfigMap
apiVersion: v1
metadata:
  name: redis-config
  namespace: bk-cmdb
  labels:
    app: redis
data:
  redis.conf: |-
    dir /data
    port 6379
    bind 0.0.0.0
    appendonly yes
    protected-mode no
    requirepass 123456
    pidfile /data/bk-cmdb/redis/redis-6379.pid
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: redis
  name: redis
  namespace: bk-cmdb
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
    spec:
      nodeSelector:
        kubernetes.io/hostname: node04
      containers:
      - image: redis:6.2
        imagePullPolicy: IfNotPresent
        name: redis
        command:
        - "sh"
        - "-c"
        - "redis-server /usr/local/etc/redis/redis.conf"
        ports:
        - containerPort: 6379
        volumeMounts:
        - name: data
          mountPath: /data
        - name: config
          mountPath: /usr/local/etc/redis/redis.conf
          subPath: redis.conf
      volumes:
      - name: config
        configMap:
          name: redis-config
      - name: data
        hostPath:
          path: /data/bk-cmdb/redis/data
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: redis
  name: redis
  namespace: bk-cmdb
spec:
  ports:
  - port: 6379
    protocol: TCP
    targetPort: 6379
    nodePort: 30279
  selector:
    app: redis
  type: NodePort

3.2 Zookeeper

# Setup Service to provide access to Zookeeper for clients
apiVersion: v1
kind: Service
metadata:
  # DNS would be like zookeeper.zoons
  name: zookeeper
  labels:
    app: zookeeper
  namespace: bk-cmdb
spec:
  ports:
  - port: 2181
    name: client
    targetPort: 2181
    nodePort: 30181
  - port: 7000
    name: prometheus
    targetPort: 7000
    nodePort: 30700
  selector:
    app: zookeeper
    what: node
  type: NodePort
---
# Setup Headless Service for StatefulSet
apiVersion: v1
kind: Service
metadata:
  # DNS would be like zookeeper-0.zookeepers.etc
  name: zookeepers
  labels:
    app: zookeeper
  namespace: bk-cmdb
spec:
  ports:
    - port: 2888
      name: server
    - port: 3888
      name: leader-election
  clusterIP: None
  selector:
    app: zookeeper
    what: node
---
# Setup max number of unavailable pods in StatefulSet
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: zookeeper-pod-disruption-budget
  namespace: bk-cmdb
spec:
  selector:
    matchLabels:
      app: zookeeper
  maxUnavailable: 1
---
# Setup Zookeeper StatefulSet
# Possible params:
# 1. replicas
# 2. memory
# 3. cpu
# 4. storage
# 5. storageClassName
# 6. user to run app
apiVersion: apps/v1
kind: StatefulSet
metadata:
  # nodes would be named as zookeeper-0, zookeeper-1, zookeeper-2
  name: zookeeper
  namespace: bk-cmdb
spec:
  selector:
    matchLabels:
      app: zookeeper
  serviceName: zookeepers
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: zookeeper
        what: node
      annotations:
        prometheus.io/port: '7000'
        prometheus.io/scrape: 'true'
    spec:
#      affinity:
#        podAntiAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            - labelSelector:
#                matchExpressions:
#                  - key: "app"
#                    operator: In
#                    values:
#                      - zookeeper
#              topologyKey: "kubernetes.io/hostname"
      nodeSelector:
        app: zookeeper
      containers:
        - name: kubernetes-zookeeper
          imagePullPolicy: IfNotPresent
          image: "zookeeper:3.6.3"
          resources:
            requests:
              memory: "512M"
              cpu: "1"
            limits:
              memory: "4Gi"
              cpu: "2"
          ports:
            - containerPort: 2181
              name: client
            - containerPort: 2888
              name: server
            - containerPort: 3888
              name: leader-election
            - containerPort: 7000
              name: prometheus
          # See those links for proper startup settings:
          # https://github.com/kow3ns/kubernetes-zookeeper/blob/master/docker/scripts/start-zookeeper
          # https://clickhouse.yandex/docs/en/operations/tips/#zookeeper
          # https://github.com/ClickHouse/ClickHouse/issues/11781
          command:
            - bash
            - -x
            - -c
            - |
              SERVERS=3 &&
              HOST=`hostname -s` &&
              DOMAIN=`hostname -d` &&
              CLIENT_PORT=2181 &&
              SERVER_PORT=2888 &&
              ELECTION_PORT=3888 &&
              PROMETHEUS_PORT=7000 &&
              ZOO_DATA_DIR=/var/lib/zookeeper/data &&
              ZOO_DATA_LOG_DIR=/var/lib/zookeeper/datalog &&
              {
                echo "clientPort=${CLIENT_PORT}"
                echo 'tickTime=2000'
                echo 'initLimit=300'
                echo 'syncLimit=10'
                echo 'maxClientCnxns=2000'
                echo 'maxSessionTimeout=60000000'
                echo "dataDir=${ZOO_DATA_DIR}"
                echo "dataLogDir=${ZOO_DATA_LOG_DIR}"
                echo 'autopurge.snapRetainCount=10'
                echo 'autopurge.purgeInterval=1'
                echo 'preAllocSize=131072'
                echo 'snapCount=3000000'
                echo 'leaderServes=yes'
                echo 'standaloneEnabled=false'
                echo '4lw.commands.whitelist=*'
                echo 'metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider'
                echo "metricsProvider.httpPort=${PROMETHEUS_PORT}"
              } > /conf/zoo.cfg &&
              {
                echo "zookeeper.root.logger=CONSOLE"
                echo "zookeeper.console.threshold=INFO"
                echo "log4j.rootLogger=\${zookeeper.root.logger}"
                echo "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender"
                echo "log4j.appender.CONSOLE.Threshold=\${zookeeper.console.threshold}"
                echo "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout"
                echo "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n"
              } > /conf/log4j.properties &&
              echo 'JVMFLAGS="-Xms128M -Xmx4G -XX:+UseG1GC -XX:+CMSParallelRemarkEnabled"' > /conf/java.env &&
              if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
                  NAME=${BASH_REMATCH[1]}
                  ORD=${BASH_REMATCH[2]}
              else
                  echo "Failed to parse name and ordinal of Pod"
                  exit 1
              fi &&
              mkdir -p ${ZOO_DATA_DIR} &&
              mkdir -p ${ZOO_DATA_LOG_DIR} &&
              export MY_ID=$((ORD+1)) &&
              echo $MY_ID > $ZOO_DATA_DIR/myid &&
              for (( i=1; i<=$SERVERS; i++ )); do
                  echo "server.$i=$NAME-$((i-1)).$DOMAIN:$SERVER_PORT:$ELECTION_PORT" >> /conf/zoo.cfg;
              done &&
              chown -Rv zookeeper "$ZOO_DATA_DIR" "$ZOO_DATA_LOG_DIR" "$ZOO_LOG_DIR" "$ZOO_CONF_DIR" &&
              zkServer.sh start-foreground
          readinessProbe:
            exec:
              command:
                - bash
                - -c
                - "OK=$(echo ruok | nc 127.0.0.1 2181); if [[ \"$OK\" == \"imok\" ]]; then exit 0; else exit 1; fi"
            initialDelaySeconds: 10
            timeoutSeconds: 5
          livenessProbe:
            exec:
              command:
                - bash
                - -c
                - "OK=$(echo ruok | nc 127.0.0.1 2181); if [[ \"$OK\" == \"imok\" ]]; then exit 0; else exit 1; fi"
            initialDelaySeconds: 10
            timeoutSeconds: 5
          volumeMounts:
            - name: data-dir-volume
              mountPath: /var/lib/zookeeper
      # Run as a non-privileged user
      securityContext:
        runAsUser: 0
        fsGroup: 0
  volumeClaimTemplates:
    - metadata:
        name: data-dir-volume
      spec:
        # 替换成自己的存储器
        storageClassName: nfs-storage
        accessModes:
          - ReadWriteOnce
        resources:
          requests:
            storage: 25Gi

3.3 Mongodb

apiVersion: v1
kind: ConfigMap
metadata:
  name: mongodb-rs-cm
data:
  keyfile: |
        dGhpcyBpcyBycyBzdXBlciBzZWNyZXQga2V5Cg==
  mongod_rs.conf: |+
    systemLog:
      destination: file
      logAppend: true
      path: /data/mongod.log
    storage:
      dbPath: /data
      journal:
        enabled: true
      directoryPerDB: true
      wiredTiger:
        engineConfig:
          cacheSizeGB: 2
          directoryForIndexes: true
    processManagement:
      fork: true
      pidFilePath: /data/mongod.pid
    net:
      port: 27017
      bindIp: 0.0.0.0
      maxIncomingConnections: 5000
    security:
      keyFile: /data/configdb/keyfile
      authorization: enabled
    replication:
      oplogSizeMB: 1024
      replSetName: rs0    
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mongodb-rs
spec:
  serviceName: mongodb-rs
  replicas: 3
  selector:
    matchLabels:
      app: mongodb-rs
  template:
    metadata:
      labels:
        app: mongodb-rs
    spec:
      containers:
      - name: mongo
        image: mongo:4.4.1
        ports:
        - containerPort: 27017
          name: client
        command: ["sh"]
        args:
        - "-c"
        - |
          set -ex
          mongod --config /data/configdb/mongod_rs.conf
          sleep infinity              
        env:
        - name: POD_IP
          valueFrom:
            fieldRef:
              fieldPath: status.podIP
        volumeMounts:
        - name: conf
          mountPath: /data/configdb
          readOnly: false
        - name: data
          mountPath: /data
          readOnly: false
      volumes:
      - name: conf
        configMap:
          name: mongodb-rs-cm
          defaultMode: 0600
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 80Gi
      storageClassName: nfs-storage
---
apiVersion: v1
kind: Service
metadata:
  name: mongodb-rs
  labels:
    app: mongodb-rs
  namespace: bk-cmdb
spec:
  ports:
    - port: 27017
      targetPort: 27017
      nodePort: 30717
  selector:
    app: mongodb-rs
  type: NodePort

mongodb初始化配置。

# 创建cmdb数据库和user
# 1.进入容器
kubectl  exec -it mongodb-rs-0 -n bk-cmdb bash

# 2.进入数据库
mongo

# 3.集群init
# members解析如下:
#     pod名字 .<service名>.<namespace>.固定格式
# mongodb-rs-0.mongodb-rs.bk-cmdb.svc.cluster.local

config = { _id:"rs0", members:[
         {_id:0,host:"mongodb-rs-0.mongodb-rs.bk-cmdb.svc.cluster.local:27017",priority:90}, 
         {_id:1,host:"mongodb-rs-1.mongodb-rs.bk-cmdb.svc.cluster.local:27017",priority:80}, 
         {_id:2,host:"mongodb-rs-2.mongodb-rs.bk-cmdb.svc.cluster.local:27017",priority:70}
    ]
}
rs.initiate(config)
# 查看集群状态
rs.status()

# 4. 创建管理员账号
use admin
db.createUser({user:"admin", pwd:"123456", roles:[{role: "userAdminAnyDatabase", db:"admin" }]})
db.auth('admin','123456')
db.grantRolesToUser("admin", ["clusterAdmin"])

# 5. 创建普通用户
use cmdb
db.createUser({user: "cc",pwd: "cc",roles: [ { role: "readWrite", db: "cmdb" },{ role: "readWrite", db: "monstache" } ]})

3.4 Elasticsearch

apiVersion: v1
kind: ConfigMap
metadata:
  name: es-7
  namespace: bk-cmdb
data:
  elasticsearch.yml: |
    node.name: master
    cluster.initial_master_nodes: ["master"]
    network.host: 0.0.0.0
    path.data: /data
    http.cors.enabled: true
    http.cors.allow-origin: "*"
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: elasticsearch7-single
  namespace: bk-cmdb
spec:
  replicas: 1
  selector:
    matchLabels:
      name: elasticsearch7
  template:
    metadata:
      labels:
        name: elasticsearch7
    spec:
      nodeSelector:
        kubernetes.io/hostname: node07
      initContainers:
      - name: init-sysctl
        image: busybox:stable
        imagePullPolicy: IfNotPresent
        command:
        - sysctl
        - -w
        - vm.max_map_count=655360
        securityContext:
          privileged: true
      containers:
      - name: elasticsearch
        image: elasticsearch:7.5.2
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 9200
        - containerPort: 9300
        volumeMounts:
        - name: es-data
          mountPath: /data
        - name: es-plugins
          mountPath: /usr/share/elasticsearch/plugins
        - name: es-conf
          mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
          subPath: elasticsearch.yml
      volumes:
      - name: es-data
        hostPath:
          path: /data/bk-cmdb/elasticsearch/data
      - name: es-plugins
        hostPath:
          path: /data/bk-cmdb/elasticsearch/plugins
      - name: es-conf
        configMap:
            name: es-7
---
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch7-single
  namespace: bk-cmdb
  labels:
    name: elasticsearch7
spec:
  type: NodePort
  ports:
  - port: 9200
    targetPort: 9200
    nodePort: 30920
    name: elasticsearch7-single1
  - port: 9300
    targetPort: 9300
    nodePort: 30930
    name: elasticsearch7-single2
  selector:
    name: elasticsearch7

3.5 部署cmdb

Dockerfile.product文件如下:

FROM centos:7

# copy to bin directory
RUN mkdir -p /data/bin/
COPY bk-cmdb /data/bin/bk-cmdb/

Docker build镜像

# 1.拷贝生成物 
cd ${GOPATH}/src/configcenter/helm/image
cp -a ${GOPATH}/src/configcenter/src/bin/pub/cmdb bk-cmdb

# 2.生成配置文件
# --discovery:zookeeper地址
# --database:mongodb数据库 cmdb 
# --blueking_cmdb_url:cmdb被外部访问的端口
# --user_info:定义账号密码,以:冒号分隔。多个用户以,逗号分隔,示例:admin:123456,songqi:123456,pcj:123456
cd bk-cmdb
python init.py  \
      --discovery          172.16.0.100:30181 \
      --database           cmdb \
      --redis_ip           172.16.0.100 \
      --redis_port         30279 \
      --redis_pass         123456\
      --mongo_ip           172.16.0.100 \
      --mongo_port         30217 \
      --mongo_user         cc \
      --mongo_pass         cc \
      --blueking_cmdb_url  http://172.16.0.28:8083/ \
      --listen_port        8083 \
      --es_url             http://172.16.0.100:30920\
      --log_level          3 \
      --user_info          admin:123456

# 3.在执行完第二步配置文件初始化后,一些shell脚本的格式需要转换下 dos2unix
find . -name "*.sh" | xargs dos2unix && find . -name "*.py" | xargs dos2unix

制作镜像需要注意的是,在build镜像时,cmdb前后端程序文件所在的宿主机目录要跟容器内所在的目录保持一致,以上述Dockerfile-product为例,我需要在宿主机的/data/bin/目录下去build镜像

# 4.制作镜像
[root@node05 bin]# pwd
/data/bin
[root@node05 bin]# ls -l
total 8
drwxrwxr-x 21 1001 1001 4096 Jul  9 16:22 bk-cmdb
-rw-r--r--  1 root root   94 Jul  9 11:02 Dockerfile.product
[root@node05 bin]# docker build . -t 172.16.0.28:9000/cmdb/bk-cmdb:v3.10.17 -f Dockerfile.product --no-cache
[root@node05 bin]# docker push 172.16.0.28:9000/cmdb/bk-cmdb:v3.10.17
bk-cmdb.yaml 

kubectl apply -f bk-cmdb.yaml。目前没有把这些服务拆分开,守护进程用sleep代替,后续改改即可

apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: bk-cmdb
  name: bk-cmdb
  namespace: bk-cmdb
spec:
  replicas: 1
  selector:
    matchLabels:
      app: bk-cmdb
  template:
    metadata:
      labels:
        app: bk-cmdb
    spec:
      nodeSelector:
        kubernetes.io/hostname: node05
      containers:
      - image: 172.16.0.28:9000/cmdb/bk-cmdb:v3.10.18
        imagePullPolicy: IfNotPresent
        name: bk-cmdb
        command:
        - "sh"
        - "-c"
        - "/data/bin/bk-cmdb/start.sh && sleep 1000000000000"
        ports:
        - containerPort: 8083
---
apiVersion: v1
kind: Service
metadata:
  labels:
    app: bk-cmdb
  name: bk-cmdb
  namespace: bk-cmdb
spec:
  ports:
  - port: 8083
    protocol: TCP
    targetPort: 8083
    nodePort: 30800
  selector:
    app: bk-cmdb
  type: NodePort

由于mongodb版本大于3.6,所以部署完bk-cmdb需要手动执行init_db.sh

kubectl  exec -it bk-cmdb-676b7b568-v79rm -n bk-cmdb bash
./init_db.sh

4 参考文章

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐