Flink On Yarn模式配置
Flink On Yarn模式配置Flink On Yarn模式配置引言一、安装JDK二、安装Hadoop三、安装Zookeeper四、安装FlinkFlink On Yarn模式配置引言Flink依靠Yarn来实现高可用,由于Yarn依赖于Hadoop,而Hadoop又依赖于Jdk。准备三台机器1.1.1.1node11.1.1.2node21.1.1.3node3一、安装JDK1.
·
Flink On Yarn模式配置
Flink On Yarn模式配置
引言
Flink依靠Yarn来实现高可用,由于Yarn依赖于Hadoop,而Hadoop又依赖于Jdk。
准备三台机器
1.1.1.1 node1
1.1.1.2 node2
1.1.1.3 node3
一、安装JDK
1. 下载解压
tar -xvf jdk-8u271-linux-x64.tar.gz -C /usr/local
mv jdk_1.8.271 jdk
2. 配置环境变量
export JAVA_HOME=/usr/local/jdk
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
3. 验证
java -version
二、安装Zookeeper
1. 下载解压
tar -xvf apache-zookeeper-3.5.9-bin.tar.gz -C /usr/local
mv /usr/local/apache-zookeeper-3.5.9 /usr/local/zookeeper
2. 修改用户名和用户组权限
chown -R root:root zookeeper/
3. 配置环境变量
4. 修改配置文件
cp zoo_sample.cfg zoo.cfg
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
# do not use /tmp for storage, /tmp here is just
# example sakes.
dataDir=/usr/local/zookeeper/tmp/data/zookeeper
dataLogDir=/usr/local/zookeeper/tmp/log/zookeeper
# the port at which the clients will connect
clientPort=2181
autopurge.purgeInterval=1
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
# 注:server.1中的1为服务器id,需要与myid中的id一致
# 每个节点重复以上步骤
5. 设置服务器id
mkdir -p /usr/local/zookeeper/tmp/data/zookeeper
touch /usr/local/zookeeper/tmp/data/zookeeper/myid
echo 1 > /usr/local/zookeeper/tmp/data/zookeeper/myid
# node2 2 , node3中echo 3
6. 启动服务器
zkServer.sh start
7. 连接客户端
zkCli.sh -server node1:2181
三、安装Hadoop
1. 配置hosts,做主机名到ip地址映射,每台机器都要更改
vi /etc/hosts
添加如下内容
1.1.1.1 node1
1.1.1.2 node2
1.1.1.3 node3
2. 配置ssh免密登录
ssh-keygen
ssh-copy-id node2
ssh-copy-id node3
3. 解压hadoop安装包
tar -xvf hadoop-2.10.1.tar.gz -C /usr/local
mv hadoop-2.10.1 hadoop
4. 配置环境变量
export HADOOP_HOME=/usr/local/hadoop
export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin
5. 配置HDFS集群
1. hadoop-env.sh
添加jdk路径
export JAVA_HOME=/usr/local/jdk
2. core-site.xml
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>file:/usr/local/hadoop/data/hdfs/tmp</value>
<description>A base for other temporary directories.</description>
</property>
<!-- sequenceFiles文件中读写缓存size设定 单位为KB,131072即默认为64M -->
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns</value>
</property>
<!-- 允许root用户在任意主机节点代理任意的用户组 -->
<property>
<name>hadoop.proxyuser.root.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.root.groups</name>
<value>*</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/local/hadoop/data/hdfs/journal</value>
</property>
<!-- zookeeper信息 -->
<property>
<name>ha.zookeeper</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
</configuration>
3. hdfs-site.xml
<configuration>
<property>
<!-- 分片数量 -->
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<!-- 切分的block大小 单位为KB 即128M-->
<name>dfs.block.size</name>
<value>134217728</value>
</property>
<property>
<!-- namenode在本地元数据的存储路径 -->
<name>dfs.namenode.name.dir</name>
<value>file:///usr/local/hadoop/data/hdfs/namenode</value>
</property>
<property>
<!-- datanode在本地存放block的存储路径 -->
<name>dfs.datanode.data.dir</name>
<value>file:///usr/local/hadoop/data/hdfs/datanode</value>
</property>
<property>
<!-- namenode日志文件存储路径 -->
<name>dfs.namenode.edits.dir</name>
<value>file:///usr/local/hadoop/data/hdfs/nn/edits</value>
</property>
<!-- 集群名 -->
<!--指定hdfs的nameservice为ns,需要和core-site.xml中的保持一致 -->
<property>
<name>dfs.nameservices</name>
<value>ns</value>
</property>
<!-- ns下面有两个NameNode,分别是nn1,nn2 -->
<property>
<name>dfs.ha.namenodes.ns</name>
<value>nn1,nn2</value>
</property>
<!-- nn1的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns.nn1</name>
<value>master:9000</value>
</property>
<!-- nn1的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns.nn1</name>
<value>master:50070</value>
</property>
<!-- nn2的RPC通信地址 -->
<property>
<name>dfs.namenode.rpc-address.ns.nn2</name>
<value>slave1:9000</value>
</property>
<!-- nn2的http通信地址 -->
<property>
<name>dfs.namenode.http-address.ns.nn2</name>
<value>slave1:50070</value>
</property>
<!-- 指定NameNode的edits元数据在JournalNode上的存放位置 -->
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://master:8485;slave1:8485;slave2:8485/ns</value>
</property>
<!-- secondaryNamenode的网页端口号 -->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>master:9001</value>
</property>
<!-- 不区分namenode和datanode的端口号,可直接使用namenode的ip端口号进行所有的webhdfs操作 -->
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<!-- 指定JournalNode在本地磁盘存放数据的位置 -->
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/usr/local/hadoop/data/hdfs/journaldata</value>
</property>
<!-- 开启NameNode失败自动切换 -->
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- 指定该集群出故障时,哪个实现类负责执行故障切换 -->
<property>
<name>dfs.client.failover.proxy.provider.ns</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<!-- 配置隔离机制方法-->
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<!-- 使用sshfence隔离机制时需要ssh免登陆 -->
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>~/.ssh/id_rsa</value>
</property>
<!-- 配置sshfence隔离机制超时时间 -->
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>30000</value>
</property>
<!-- 每个用户存取文件时,是否检查权限 -->
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
</configuration>
4. mapred-site.xml
<configuration>
<!-- 执行框架为yarn -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- jobhistory地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>node1:10200</value>
</property>
<!-- jobhistory网页地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>node1:19888</value>
</property>
</configuration>
6. 配置yarn集群
yarn-site.xml
<configuration>
<!-- nodemanager上运行的附属服务,不配置成mapreduce_shuffle则无法运行mapreduce程序 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.cluster-id</name>
<value>ns</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>node1</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm2</name>
<value>node2</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm1</name>
<value>node1:8088</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address.rm2</name>
<value>node2:8088</value>
</property>
<property>
<name>yarn.resourcemanager.recovery.enabled</name>
<value>true</value>
</property>
<!-- 基于zookeeper的HA高可用 -->
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<!-- 开启日志聚合功能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- 日志保留设置为7天 -->
<property>
<name>yarn.log-aggregation-retain-seconds</name>
<value>604800</value>
</property>
<!-- 配置为zookeeper存储时,指定zookeeper集群的地址 -->
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>node1:2181,node2:2181,node3:2181</value>
</property>
<!-- nodemanager运行内存,必须大于或等于1024,否则nodemanager启动不成功 -->
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>4096</value>
</property>
<!-- 关闭yarn内存检查 -->
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.nodemanager.pmem-check-enabled</name>
<value>false</value>
</property>
<property>
<name>yarn.client.failover-proxy-provider</name>
<value>org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<!-- application master在重启时,最大的尝试次数 -->
<property>
<name>yarn.resourcemanager.am.max-attempts</name>
<value>10</value>
</property>
</configuration>
7. 将/usr/local/hadoop文件夹分发给slave1和slave2
scp -r hadoop root@node2:/usr/local/
scp -r hadoop root@node3:/usr/local/
8. 修改master节点/usr/local/hadoop下的slaves文件
node2
node3
9. 修改两个slaves节点/usr/local/hadoop下的slaves文件(源文件默认为localhost),修改为当前的主机名
即slaves1修改为node2
slaves2的修改为node3
10. 启动集群
1) 在node1上
hdfs zkfc -formatZK
2) 在三个节点分别启动
hadoop-daemon.sh start journalnode
3) 在node1
hdfs namenode -format
hadoop-daemon.sh start namenode
4) 在node2上
hdfs namenode -format
hdfs namenode -bootstrapStandby
hadoop-daemon.sh start namenode
5) 在node1和node2上
hadoop-daemon.sh start zkfc
6) 在三个节点上分别启动
hadoop-daemon.sh start datanode
7) 在node1和node2上
yarn-daemon.sh start resourcemanager
8) 在三个节点上分别启动
yarn-daemon.sh start nodemanager
11. 验证
jps
日常启动
在三个节点分别启动
hadoop-daemon.sh start journalnode
在node1和node2启动
hadoop-daemon.sh start zkfc
一键启动
start-dfs.sh
start-yarn.sh
四、安装Flink
1. 下载解压
tar -xvf flink-1.13.2-bin-scala_2.11.tgz -C /usr/local/
mv /usr/local/flink-1.13.2 /usr/local/flink
2. 配置环境变量
export HADOOP_CLASSPATH=`/usr/local/hadoop/bin/hadoop classpath`
export FLINK_HOME=/usr/local/flink
3. 编辑配置文件
vi flink-conf.yaml
# JobManager内存主要分为四部分:JVM Heap、Off-Heap Memory、JVM Metaspace、JVM Overhead
# JobManager总内存设置为2048m,则JVM Overhead可根据0.1的fraction换算得到204.8m,即JVM Overhead内存为205m
# JVM Metaspace默认为256m
# Off-Heap Memory默认为128m
# JVM Heap最终被推断为2048m-205m-256m - 128m = 1459m,即1.42g
# 但gc算法会占用一小部分固定内存作为Non-Heap,占用大小为0.05g
# JVM Heap实际大小为1.42g - 0.05g = 1.38g
env.java.opts: "-Dfile.encoding=UTF-8"
env.hadoop.conf.dir: /usr/local/hadoop/etc/hadoop
jobmanager.rpc.address: node1
jobmanager.rpc.port: 6123
#JobManager jvm堆大小,主要取决于运行的作业数量、作业结构及用户代码的要求
jobmanager.heap.size: 1024m
#进程总内存
jobmanager.memory.process.size: 1024m
jobmanager.bind-host: 0.0.0.0
taskmanager.memory.process.size: 1024m
#每个TaskManager提供的任务Slots数量,建议与cpu核数一致
taskmanager.numberOfTaskSlots: 1
taskmanager.bind-host: 0.0.0.0
taskmanager.host: localhost
parallelism.default: 1
high-availability: zookeeper
# flink在重启时,尝试的最大次数
yarn.application-attempts: 10
high-availability.storageDir: hdfs://ns/flink/recovery
high-availability.zookeeper.quorum: node1:2181,node2:2181,node3:2181
high-availability.zookeeper.path.root: /flink
#用于存储和检查点状态
state.backend: filesystem
state.checkpoints.dir: hdfs://ns/flink/checkpoints
state.savepoints.dir: hdfs://ns/flink/savepoints
#故障转移策略
jobmanager.execution.failover-strategy: region
rest.port: 8081
rest.address: localhost
rest.bind-address: localhost
rest.bind-port: 8082-8087
#是否启动web提交
web.submit.enable: true
io.tmp.dirs: /usr/local/flink/data/tmp
env.log.dir: /usr/local/flink/data/logs
taskmanager.memory.network.fraction: 0.1
taskmanager.memory.network.min: 64mb
taskmanager.memory.network.max: 1gb
fs.hdfs.hadoopconf: /usr/local/hadoop/etc/hadoop
historyserver.web.address: 0.0.0.0
historyserver.web.port: 8082
historyserver.archive.fs.refresh-interval: 10000
4. 修改masters
node1:8081
node2:8081
5. 修改workers
node1
node2
node3
6. 修改conf目录下的zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/usr/local/flink/data/tmp/zookeeper/dataDir
dataLogDir=/usr/local/flink/data/tmp/zookeeper/dataLogDir
clientPort=2181
server.1=node1:2888:3888
server.2=node2:2888:3888
server.3=node3:2888:3888
7. 新建文件夹
mkdir -p /usr/local/flink/data/tmp
mkdir -p /usr/local/flink/data/logs
8. 添加jar包
flink-shaded-hadoop-2-uber-2.8.3-10.0.jar
commons-cli-1.8.0.jar
9. 将/usr/local/flink文件夹分发给slave1和slave2
scp -r flinkroot@node2:/usr/local/
scp -r flinkroot@node3:/usr/local/
10. 启动flink yarn session模式
yarn-session.sh
11. 测试
bin/flink run-application -t yarn-application examples/streaming/WordCount.jar
更多推荐
已为社区贡献1条内容
所有评论(0)