注意当前环境基于 《HDFS 部署之四 (yarn和mapreduce)》的环境
# flink 下载页面
https://flink.apache.org/downloads.html#apache-flink-1100
cd /usr/local/src
# 下载 flink
wget https://apache.mirrors.lucidnetworks.net/flink/flink-1.10.0/flink-1.10.0-bin-scala_2.11.tgz
# 下载 Pre-bundled Hadoop 2.8.3
wget https://repo.maven.apache.org/maven2/org/apache/flink/flink-shaded-hadoop-2-uber/2.8.3-10.0/flink-shaded-hadoop-2-uber-2.8.3-10.0.jar
# 配置环境变量
ssh node-1 "echo 'export FLINK_HOME=/usr/local/flink-1.10.0' > /etc/profile.d/flink.sh"
ssh node-2 "echo 'export FLINK_HOME=/usr/local/flink-1.10.0' > /etc/profile.d/flink.sh"
ssh node-3 "echo 'export FLINK_HOME=/usr/local/flink-1.10.0' > /etc/profile.d/flink.sh"
tar zxf flink-1.10.0-bin-scala_2.11.tgz
cd flink-1.10.0/conf
# 编辑 flink-conf.yaml 添加如下
high-availability: zookeeper
high-availability.zookeeper.quorum: node-1:2181,node-2:2181,node-3:2181
high-availability.zookeeper.path.root: /flink
high-availability.cluster-id: /cluster_one
high-availability.storageDir: hdfs:///flink/recovery
# 编辑 masters 内容如下
node-1:8081
node-2:8081
# 编辑 slaves 内容如下
node-1
node-2
node-3
# 拷贝 flink-shaded-hadoop 到 flink-1.10.0/lib/
cp flink-shaded-hadoop-2-uber-2.8.3-10.0.jar flink-1.10.0/lib/
# 拷贝flink-1.10.0到其他两个节点和当前主机的/usr/local/目录下
cd ..
cp -r flink-1.10.0 /usr/local/
scp -r /usr/local/flink-1.10.0/ node-2:/usr/local/
scp -r /usr/local/flink-1.10.0/ node-3:/usr/local/
# 在hdfs上创建 /flink/recovery 目录
/usr/local/hadoop-3.1.3/bin/hdfs dfs -mkdir -p /flink/recovery/cluster_one
# 启动集群
[root@node-1 flink-1.10.0]# bin/start-cluster.sh
# jps 查看结果
[root@node-1 flink-1.10.0]# jps
7568 QuorumPeerMain
72576 JournalNode
72163 NameNode
72787 DFSZKFailoverController
72309 DataNode
74292 TaskManagerRunner
73932 StandaloneSessionClusterEntrypoint
74670 Jps
[root@node-2 ~]# jps
3876 QuorumPeerMain
38486 NameNode
38678 JournalNode
40008 TaskManagerRunner
39657 StandaloneSessionClusterEntrypoint
38571 DataNode
38782 DFSZKFailoverController
40175 Jps
[root@node-3 ~]# jps
2944 QuorumPeerMain
18276 Jps
17464 DataNode
18184 TaskManagerRunner
17567 JournalNode
浏览器访问
参考 :https://ci.apache.org/projects/flink/flink-docs-release-1.10/ops/jobmanager_high_availability.html
Comments Closed.