1、环境

系统 :linux

服务器4台:

k8s环境

nfs共享目录

2、创建命名空间

kubectl create namespace minio-system

3、创建storageclass自动卷、

先创建好目录

镜像直接docker search nfs-client-provisioner

storageclass.yaml

kind: ServiceAccount
apiVersion: v1
metadata:
  name: nfs-client-provisioner
  namespace: minio-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner-minio
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner-minio
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: minio-system
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner-minio
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner-minio
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner-minio
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: minio-system
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner-minio
  apiGroup: rbac.authorization.k8s.io
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
  namespace: minio-system
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: harbor.domain.com/library/nfs-client-provisioner:latest #镜像名称跟版本号
          imagePullPolicy: IfNotPresent #这个是代表使用本地镜像,默认从网上拉取
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: minio-data
            - name: NFS_SERVER
              value: 192.168.1.1
            - name: NFS_PATH
              value: /nfs/data/minio
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.1.1
            path: /nfs/data/minio
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: minio-data
  namespace: minio-system
  annotations:
    storageclass.kubernetes.io/is-default-class: "false" #---设置为默认的storageclass
provisioner: minio-data    #---动态卷分配者名称,必须和上面创建的"provisioner"变量中设置的Name一致
parameters:
  archiveOnDelete: "true"  #---设置为"false"时删除PVC不会保留数据,"true"则保留数据

4、部署minio

镜像用的是  minio/minio

minio.yaml

apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: minio-system
  name: minio
spec:
  serviceName: minio
  replicas: 4
  selector:
    matchLabels:
      app: minio
  template:
    metadata:
      labels:
        app: minio
    spec:
      containers:
      - name: minio
        env:
        - name: MINIO_ROOT_USER
          value: "admin"
        - name: MINIO_ROOT_PASSWORD
          value: "admin123456"
        image: minio/minio:latest
        imagePullPolicy: IfNotPresent
        command:
          - /bin/sh
          - -c
          - minio server --console-address ":5000" http://minio-{0...3}.minio.minio-system.svc.cluster.local/data
        ports:
        - name: data
          containerPort: 9000
          protocol: "TCP"
        - name: console
          containerPort: 5000
          protocol: "TCP"
        volumeMounts:
        - name: data
          mountPath: /data
        - name: date-config
          mountPath: /etc/localtime
      volumes:
        - name: date-config
          hostPath:
            path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 10Gi
      storageClassName: minio-data
 
---
 
apiVersion: v1
kind: Service
metadata:
  namespace: minio-system
  name: minio
  labels:
    app: minio
spec:
  clusterIP: None
  ports:
    - port: 9000
      name: data
    - port: 5000
      name: console
  selector:
    app: minio
---
apiVersion: v1
kind: Service
metadata:
  namespace: minio-system
  name: minio-service
spec:
  type: NodePort
  ports:
   - name: data
     port: 9000
     targetPort: 9000
     protocol: TCP
     nodePort: 30201
   - name: console
     port: 5000
     targetPort: 5000
     protocol: TCP
     nodePort: 30202
  selector:
    app: minio
 
 

5、测试

访问地址 ip:30202,然后输入账号密码

6、整合java

6.1、导入依赖

        <!--minio依赖-->
        <dependency>
            <groupId>io.minio</groupId>
            <artifactId>minio</artifactId>
            <version>8.4.5</version>
        </dependency>
        <!--进度条-->
        <dependency>
            <groupId>me.tongfei</groupId>
            <artifactId>progressbar</artifactId>
            <version>0.5.3</version>
        </dependency>
        <!--http文件上传-->
        <dependency>
            <groupId>com.squareup.okhttp3</groupId>
            <artifactId>okhttp</artifactId>
            <version>4.10.0</version>
        </dependency>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>4.13.2</version>
            <scope>test</scope>
        </dependency>

 6.2、上传下载

/**
 * https://min.io/docs/minio/linux/developers/java/API.html
 */
public class Demo {


    /**
     * 上传文件
     */
    @Test
    public void test1() {

        try{
            MinioClient client =
                    MinioClient.builder()
                            .endpoint("http://192.168.1.1:30201/")
                            .credentials("admin","admin123456")
                            .build();
            String buckedName = "demo";

            boolean found = client.bucketExists(BucketExistsArgs.builder().bucket(buckedName).build());
            if (!found){
                client.makeBucket(MakeBucketArgs.builder().bucket(buckedName).build());
            }else{
                System.out.println("demo存储桶存在");
            }

            client.uploadObject(UploadObjectArgs.builder()
                    .bucket(buckedName)
                    .object("dns.log")
                    .filename("D:\\log\\dns.log")
                    .build());
            System.out.println("上传成功");
        }catch (Exception e){
            e.printStackTrace();
        }
    }
    /**
     * 下载文件
     */
    @Test
    public void test2() {

        try{
            MinioClient client =
                    MinioClient.builder()
                            .endpoint("http://192.168.1.1:30201/")
                            .credentials("admin","admin123456")
                            .build();
            String buckedName = "demo";

            boolean found = client.bucketExists(BucketExistsArgs.builder().bucket(buckedName).build());
            if (!found){
                client.makeBucket(MakeBucketArgs.builder().bucket(buckedName).build());
            }else{
                System.out.println("demo存储桶存在");
            }

            client.downloadObject(
                    DownloadObjectArgs.builder()
                            .bucket(buckedName)
                            .object("dns.log")
                            .filename("dns.log")
                            .build());
            System.out.println("my-objectname is successfully downloaded to my-filename");
            System.out.println("下载成功");
        }catch (Exception e){
            e.printStackTrace();
        }
    }

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐