- 软件结构
- 0 jdk, Hadoop NameNode, DFSZKFailoverController
- 1 jdk, Hadoop NameNode, DFSZKFailoverController
- 2 jdk, Hadoop ResourceManager
- 3 jdk, Hadoop, Zookeeper DataNode, NodeManager, JournalNode, QuorumPeerMain
- 4 jdk, Hadoop, Zookeeper DataNode, NodeManager, JournalNode, QuorumPeerMain
- 5 jdk, Hadoop, Zookeeper DataNode, NodeManager, JournalNode, QuorumPeerMain
- Zookeeper
- 配置conf/zoo.cfg
- tickTime=2000 # 心跳间隔(ms)
- initLimit=10 # 初始化时最多容忍心跳次数
- syncLimit=5 # 同步失败最多容忍心跳次数
- dataDir=/usr/local/Zookeeper/data # 运行时文件目录
- clientPort=2181 # 运行端口号
- server.1=主机名或ip:2888:3888 # 服务运行端口与选举端口
- server.2=主机名或ip:2888:3888
- server.3=主机名或ip:2888:3888
- 命令
- ./bin/zkServer.sh start
- ./bin/zkServer.sh status
- jps # 显示名为QuorumPeerMain
- Hadoop
- Hadoop-env.sh
- core-site.xml
- <configuration> - <property> - <name>fs.defaultFS</name> - <value>HDFS://ns1</value> - </property> - <property> - <name>Hadoop.tmp.dir</name> - <value>/usr/local/Hadoop-2.2.0/tmp</value> - </property> - <property> - <name>ha.Zookeeper.quorum</name> - <value>192.168.56.13:2181, 192.168.56.14:2181, 192.168.56.15:2181</value> - </property> - </configuration> - HDFS-site.xml
- <property> - <name>dfs.nameservices</name> - <value>ns1</value> - </property> - <property> - <name>dfs.ha.namenodes.ns1</name> - <value>nn1,nn2</value> - </property> - <property> - <name>dfs.namenode.rpc-address.ns1.nn1</name> - <value>192.168.56.10:9000</value> - </property> - <property> - <name>dfs.namenode.http-address.ns1.nn1</name> - <value>192.168.56.10:50070</value> - </property> - <property> - <name>dfs.namenode.rpc-address.ns1.nn2</name> - <value>192.168.56.11:9000</value> - </property> - <property> - <name>dfs.namenode.http-address.ns1.nn2</name> - <value>192.168.56.11:50070</value> - </property> - <property> - <name>dfs.namenode.shared.edits.dir</name> - <value>qjournal://192.168.56.13:8485;192.168.56.14:8485;192.168.56.15:8485</value> - </property> - <property> - <name>dfs.journalnode.edits.dir</name> - <value>/usr/local/Hadoop-2.2.0/journal</value> - </property> - <property> - <name>dfs.ha.automatic-failover.enabled</name> - <value>true</value> - </property> - <property> - <name>dfs.client.failover.proxy.provider.ns1</name> - <value>org.Apache.Hadoop.HDFS.server.namenode.ha.ConfiguredFailoverProxyProvider</value> - </property> - <property> - <name>dfs.ha.fencing.methods</name> - <value>sshfence</value> - </property> - <property> - <name>dfs.ha.fencing.ssh.private-key-files</name> - <value>/root/.ssh/id_rsa</value> - </property> - mapred-site.xml
- <property> - <name>mapreduce.framework.name</name> - <value>Yarn</value> - </property> - Yarn-site.xml
- <property> - <name>Yarn.resourcemanager.hostname</name> - <value>192.168.56.12</value> - </property> - <property> - <name>Yarn.nodemanager.aux-services</name> - <value>mapreduce_shuffle</value> - </property> - etc/Hadoop/slaves
- 192.168.56.13
- 192.168.56.14
- 192.168.56.15
- 收尾
- ssh免登录(0到1,2,3,4,5)
- ssh-keygen -t rsa
- ssh-copy-id -i 192.168.56.11 # 这样就可以免登录访问192.168.56.11了
- ssh-copy-id -i localhost 免登录自己
- 复制Hadoop2.2.0(从0到1,2,3,4,5)
- 添加Hadoop_home到环境变量
- etc/profile
- export HADOOP_HOME=/usr/local/Hadoop-2.2.0
- export PATH=$PATH:$HADOOP_HOME/bin
- 启动
- 0 上启动
- ./sbin/Hadoop-daemons.sh start journalnode
- 0 上格式化namenode