k8s搭建hadoop集群可持久化
k8s搭建hadoop集群
·
创建service和configmap
vim hadoop.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-hadoop-conf
namespace: dev
data:
HDFS_MASTER_SERVICE: hadoop-hdfs-master
HDOOP_YARN_MASTER: hadoop-yarn-master
---
apiVersion: v1
kind: Service
metadata:
name: hadoop-hdfs-master
namespace: dev
spec:
type: NodePort
selector:
name: hdfs-master
ports:
- name: rpc
port: 9000
targetPort: 9000
- name: http
port: 50070
targetPort: 50070
nodePort: 32007
---
apiVersion: v1
kind: Service
metadata:
name: hadoop-yarn-master
namespace: dev
spec:
type: NodePort
selector:
name: yarn-master
ports:
- name: "8030"
port: 8030
- name: "8031"
port: 8031
- name: "8032"
port: 8032
- name: http
port: 8088
targetPort: 8088
nodePort: 32088
---
apiVersion: v1
kind: Service
metadata:
name: yarn-node
namespace: dev
spec:
clusterIP: None
selector:
name: yarn-node
ports:
- port: 8040
创建hdfs集群,使用nfs作为存储
vim hadoop-datanode.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hdfs-master
namespace: dev
spec:
replicas: 1
selector:
matchLabels:
name: hdfs-master
template:
metadata:
labels:
name: hdfs-master
spec:
containers:
- name: hdfs-master
image: kubeguide/hadoop:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9000
- containerPort: 50070
env:
- name: HADOOP_NODE_TYPE
value: namenode
- name: HDFS_MASTER_SERVICE
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDFS_MASTER_SERVICE
- name: HDOOP_YARN_MASTER
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDOOP_YARN_MASTER
restartPolicy: Always
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: hadoop-datanode
namespace: dev
spec:
replicas: 3
selector:
matchLabels:
name: hadoop-datanode
serviceName: hadoop-datanode
template:
metadata:
labels:
name: hadoop-datanode
spec:
containers:
- name: hadoop-datanode
image: kubeguide/hadoop:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9000
- containerPort: 50070
volumeMounts:
- name: data
mountPath: /root/hdfs/
subPath: hdfs
- name: data
mountPath: /usr/local/hadoop/logs/
subPath: logs
env:
- name: HADOOP_NODE_TYPE
value: datanode
- name: HDFS_MASTER_SERVICE
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDFS_MASTER_SERVICE
- name: HDOOP_YARN_MASTER
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDOOP_YARN_MASTER
restartPolicy: Always
volumeClaimTemplates:
- metadata:
name: data
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: "nfs-storage"
创建yarn集群
vim yarn-node.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: yarn-master
namespace: dev
spec:
replicas: 1
selector:
matchLabels:
name: yarn-master
template:
metadata:
labels:
name: yarn-master
spec:
containers:
- name: yarn-master
image: kubeguide/hadoop:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9000
- containerPort: 50070
env:
- name: HADOOP_NODE_TYPE
value: resourceman
- name: HDFS_MASTER_SERVICE
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDFS_MASTER_SERVICE
- name: HDOOP_YARN_MASTER
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDOOP_YARN_MASTER
restartPolicy: Always
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: yarn-node
namespace: dev
spec:
replicas: 3
selector:
matchLabels:
name: yarn-node
serviceName: yarn-node
template:
metadata:
labels:
name: yarn-node
spec:
containers:
- name: yarn-node
image: kubeguide/hadoop:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8040
- containerPort: 8041
- containerPort: 8042
volumeMounts:
- name: yarn-data
mountPath: /root/hdfs/
subPath: hdfs
- name: yarn-data
mountPath: /usr/local/hadoop/logs/
subPath: logs
env:
- name: HADOOP_NODE_TYPE
value: yarnnode
- name: HDFS_MASTER_SERVICE
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDFS_MASTER_SERVICE
- name: HDOOP_YARN_MASTER
valueFrom:
configMapKeyRef:
name: kube-hadoop-conf
key: HDOOP_YARN_MASTER
restartPolicy: Always
volumeClaimTemplates:
- metadata:
name: yarn-data
namespace: default
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 2Gi
storageClassName: "nfs-storage"
执行以上三个yaml文件
打开http://ip:32007和http://ip:32088即可看到hadoop管理界面
更多推荐
已为社区贡献2条内容
所有评论(0)