大数据测试:构建Hadoop和Spark分布式HA运行环境
# 安装Java
yum install java-1.8.0-openjdk
# 配置环境变量
echo 'export JAVA_HOME=$(dirname $(dirname $(readlink -f $(which java))))' >> ~/.bashrc
source ~/.bashrc
# 安装Hadoop
wget https://downloads.apache.org/hadoop/common/hadoop-3.2.1/hadoop-3.2.1.tar.gz
tar -xzf hadoop-3.2.1.tar.gz
ln -s hadoop-3.2.1 hadoop
# 配置Hadoop环境变量
echo 'export HADOOP_HOME=/path/to/hadoop' >> ~/.bashrc
echo 'export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin' >> ~/.bashrc
source ~/.bashrc
# 配置Hadoop HA(高可用性)
# 编辑 /path/to/hadoop/etc/hadoop/hdfs-site.xml
echo '<configuration>
<property>
<name>dfs.nameservices</name>
<value>mycluster</value>
</property>
<property>
<name>dfs.ha.namenodes.mycluster</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn1</name>
<value>nn1-host:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.mycluster.nn2</name>
<value>nn2-host:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn1</name>
<value>nn1-host:9870</value>
</property>
<property>
<name>dfs.namenode.http-address.mycluster.nn2</name>
<value>nn2-host:9870</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.mycluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
</configuration>' > /path/to/hadoop/etc/hadoop/hdfs-site.xml
# 安装ZooKeeper并配置
yum install zookeeper zookeeper-server
echo 'server.1=zk1-host:2888:3888
server.2=zk2-host:2888:3888
server.3=zk3-host:2888:3888' > /var/lib/zookeeper/myid
systemctl start zookeeper
# 安装和配置JournalNodes
# 在每个DataNode上执行
echo 'mycluster/nn1-host:8485' > /path/to/hadoop/tmp/dfs/nn/edit/journal-id
echo 'mycluster/nn2-host:8485' > /path/to/hadoop/tmp/dfs/nn/edit/journal-id
# 启动所有服务
# 在NameNode 1上执行
hadoop-daemon.sh start journalnode
hdfs namenode -format
hadoop-daemon.sh start namenode
# 在NameNode 2上执行
hadoop-daemons.sh start journalnode
hdfs namenode -bootstrapStandby
hadoop-daemon.sh start namenode
# 启动DataNodes
hadoop-daemons.sh start datanode
# 安装Spark
wget https://downloads.apache.org/spark/spark-3
评论已关闭