一、安装nfs-server

k8s-master01信息【提供nfs存储的机器】
公网IP:120.55.76.34
私网IP:172.30.125.99

未来的样子

nfs:
server: 172.30.125.99
path: /data/harbor

1.1 在提供 NFS 存储主机上执行,这里默认master节点

yum install -y nfs-utils

echo "/data/harbor *(insecure,rw,sync,no_root_squash)" > /etc/exports

# 执行以下命令,启动 nfs 服务;创建共享目录
mkdir -p /data/harbor/{chartmuseum,jobservice,registry,database,redis,trivy}

# 在master执行
chmod -R 777 /data/harbor

# 使配置生效
exportfs -r

#检查配置是否生效
exportfs

systemctl enable rpcbind && systemctl start rpcbind

systemctl enable nfs && systemctl start nfs

1.2 配置nfs-client(选做)

  • 在每个node上配置nfs-client,172.30.125.99为master的私网 ip 地址
showmount -e 172.30.125.99

mkdir -p /data/harbor

mount -t nfs 172.30.125.99:/data/harbor /data/harbor

二、添加 helm repo 仓库

安装 helm 工具
官网:https://github.com/helm/helm/releases

wget https://get.helm.sh/helm-v3.7.2-linux-amd64.tar.gz
tar -zxvf helm-v3.7.2-linux-amd64.tar.gz
#解压得到文件包 linux-amd64
cd linux-amd64
cp helm /usr/local/bin/
helm version

以上,helm工具安装成功了,接下来开始添加 harbor的helm repo,并下载 chart 包

官网:https://github.com/goharbor/harbor-helm/releases

helm repo add harbor https://helm.goharbor.io
helm pull harbor/harbor --version 1.6.0
# 拉取下的chart包名 harbor-1.6.0.tgz

tar zxvf harbor-1.6.0.tgz #解压出文件名 harbor

修改 /harbor/values.yaml,下图中的字段要对照修改

k8s-master01信息【提供nfs存储的机器】
公网IP:120.55.76.34
私网IP:172.30.125.99

该node安装nfs后:
server: 172.30.125.99
path: /data/harbor

**注意:此处是集群内网的IP地址 externalURL: http://172.30.125.99:30002 *

#这里我只给出修改的参数,未修改的按照应用默认参数即可

expose:
  type: nodePort  
  
  tls:
    # 这里使用http,修改为false
    enabled: false
    
externalURL: http://172.30.125.99:30002    #这个切记修改为自己集群ip,否则会出现无法登陆情况
persistence:
  enabled: true
  resourcePolicy: "keep"
  persistentVolumeClaim:  #每个子系统存储,这里我提前创建好了pvc,如果使用动态的pvc,existingClaim空着即可,下面给出创建pv和pvc的yaml
    registry:
      # Use the existing PVC which must be created manually before bound,
      # and specify the "subPath" if the PVC is shared with other components
      existingClaim: ""
      # Specify the "storageClass" used to provision the volume. Or the default
      # StorageClass will be used(the default).
      # Set it to "-" to disable dynamic provisioning
      storageClass: "nfs-storage"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 5Gi
    chartmuseum:
      existingClaim: ""
      storageClass: "nfs-storage"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 5Gi
    jobservice:
      existingClaim: ""
      storageClass: "nfs-storage"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 1Gi
    # If external database is used, the following settings for database will
    # be ignored
    database:
      existingClaim: ""
      storageClass: "nfs-storage"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 1Gi
    # If external Redis is used, the following settings for Redis will
    # be ignored
    redis:
      existingClaim: ""
      storageClass: "nfs-storage"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 1Gi
    trivy:
      existingClaim: ""
      storageClass: "nfs-storage"
      subPath: ""
      accessMode: ReadWriteOnce
      size: 5Gi
      
harborAdminPassword: "Harbor12345"  #修改默认的登录密码

配置默认存储
在master上配置动态供应的默认存储类yaml,172.30.125.99为master的ip地址

vim harbor-storage.yaml #拷贝如下内容,记得替换spec.nfs.server的IP地址
## 创建了一个存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-storage
  annotations:
    storageclass.kubernetes.io/is-default-class: "true"
provisioner: harbor-data
parameters:
  archiveOnDelete: "true"  ## 删除pv的时候,pv的内容是否要备份

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
          # resources:
          #    limits:
          #      cpu: 10m
          #    requests:
          #      cpu: 10m
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: harbor-data
            - name: NFS_SERVER
              value: 172.30.125.99 ## 指定自己nfs服务器地址
            - name: NFS_PATH  
              value: /data/harbor  ## nfs服务器共享的目录
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.30.125.99
            path: /data/harbor
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["nodes"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: default
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
  
kubectl apply -f harbor-storage.yaml

三、部署chart

[root@master01 ~]# helm install my-harbor ./harbor/ # 可添加后缀 --namespace harbor
[root@master01 ~]# kubectl get po
NAME                                              READY   STATUS    RESTARTS       AGE
my-harbor-harbor-chartmuseum-648ddc6cc7-f6jf7     1/1     Running   3 (38m ago)    57m
my-harbor-harbor-core-787997f69-wwm8m             1/1     Running   4 (35m ago)    57m
my-harbor-harbor-database-0                       1/1     Running   3 (38m ago)    5h36m
my-harbor-harbor-jobservice-b6c898d8b-ktb9c       1/1     Running   4 (36m ago)    57m
my-harbor-harbor-nginx-5c7999cd9f-fxqwr           1/1     Running   3 (38m ago)    150m
my-harbor-harbor-notary-server-78bd56d784-vkdzd   1/1     Running   4 (38m ago)    57m
my-harbor-harbor-notary-signer-69bbf5b848-8f45n   1/1     Running   4 (38m ago)    57m
my-harbor-harbor-portal-7f965b49cd-hmhwc          1/1     Running   3 (38m ago)    5h36m
my-harbor-harbor-redis-0                          1/1     Running   3 (38m ago)    5h36m
my-harbor-harbor-registry-f566858b6-9q7df         2/2     Running   6 (38m ago)    57m
my-harbor-harbor-trivy-0                          1/1     Running   4 (35m ago)    5h36m
nfs-client-provisioner-659758485d-brdw7           1/1     Running   18 (38m ago)   9h

[root@master01 ~]# helm upgrade my-harbor ./harbor/  #更新
[root@master01 ~]# helm list -A  #查看chart
[root@master01 ~]# helm repo list #查看repo

五、 屏蔽 https 访问异常

注意 http://172.30.125.99:30002,此处的 ip 请替换搭建 harbor的服务器 IP

cat > /etc/docker/daemon.json << EOF
{
 "exec-opts":["native.cgroupdriver=systemd"],
 "registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
 "insecure-registries": ["http://172.30.125.99:30002"]
}
EOF
systemctl daemon-reload
systemctl restart docker

六、 内部访问harbor

【私网IP:172.30.125.99】
cat ./harbor/values.yaml |grep -i externalURL
docker login -u admin -p Harbor12345 http://172.30.125.99:30002

[root@master01 ~]# cat ./harbor/values.yaml |grep -i externalURL
externalURL: http://172.30.125.99:30002

[root@master01 ~]# docker login -u admin -p Harbor12345 http://172.30.125.99:30002
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded

七、浏览器访问

【公网IP:120.55.76.34】
http://120.55.76.34:30002
在这里插入图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐