20,808
社区成员
发帖
与我相关
我的任务
分享
<configuration>
<property>
<!-- hdfs 地址,ha中是连接到nameservice -->
<name>fs.defaultFS</name>
<value>hdfs://cluster</value>
</property>
<property>
<!-- -->
<name>hadoop.tmp.dir</name>
<value>/data1/hadoop/tmp</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>zk.cloud.ziroom.com:2181</value>
</property>
</configuration>
<configuration>
<!--hdfs存储时的备份个数-->
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/data1/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/data1/hadoop/hdfs/data</value>
</property>
<property>
<!-- 为namenode集群定义一个services name -->
<name>dfs.nameservices</name>
<value>cluster</value>
</property>
<property>
<!-- nameservice 包含哪些namenode,为各个namenode起名 -->
<name>dfs.ha.namenodes.cluster</name>
<value>nn1,nn2</value>
</property>
<property>
<!-- 名为nn1的namenode 的rpc地址和端口号,rpc用来和datanode通讯 -->
<name>dfs.namenode.rpc-address.cluster.nn1</name>
<value>10.216.18.100:8020</value>
</property>
<property>
<!-- 名为nn2的namenode 的rpc地址和端口号,rpc用来和datanode通讯 -->
<name>dfs.namenode.rpc-address.cluster.nn2</name>
<value>10.216.18.101:8020</value>
</property>
<property>
<!--名为nn1的namenode 的http地址和端口号,web客户端 -->
<name>dfs.namenode.http-address.cluster.nn1</name>
<value>10.216.18.100:50070</value>
</property>
<property>
<!--名为nn2的namenode 的http地址和端口号,web客户端 -->
<name>dfs.namenode.http-address.cluster.nn2</name>
<value>10.216.18.101:50070</value>
</property>
<property>
<!-- namenode间用于共享编辑日志的journal节点列表 -->
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://10.216.18.101:8485;10.216.18.102:8485;10.216.18.103:8485/cluster</value>
</property>
<property>
<!-- journalnode 上用于存放edits日志的目录 -->
<name>dfs.journalnode.edits.dir</name>
<value>/data1/hadoop/hdfs/journal</value>
</property>
<property>
<!-- 客户端连接可用状态的NameNode所用的代理类 -->
<name>dfs.client.failover.proxy.provider.cluster</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>shell(/bin/true)</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
</configuration>
<configuration>
<!-- Site specific YARN configuration properties -->
<!-- 设置 resourcemanager 在哪个节点-->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>10.216.18.100</value>
</property>
<!-- reducer取数据的方式是mapreduce_shuffle -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
************************************************************/
2018-09-26 19:23:05,145 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: registered UNIX signal handlers for [TERM, HUP, INT]
2018-09-26 19:23:05,150 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: createNameNode []
2018-09-26 19:23:05,428 INFO org.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties from hadoop-metrics2.properties
2018-09-26 19:23:05,512 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot period at 10 second(s).
2018-09-26 19:23:05,512 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: NameNode metrics system started
2018-09-26 19:23:05,515 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: fs.defaultFS is hdfs://cluster
2018-09-26 19:23:05,516 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Clients are to use cluster to access this namenode/service.
2018-09-26 19:23:05,680 INFO org.apache.hadoop.hdfs.DFSUtil: Starting Web-server for hdfs at: http://otsdb_smart_1_18_100:50070
2018-09-26 19:23:05,736 INFO org.mortbay.log: Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog
2018-09-26 19:23:05,744 INFO org.apache.hadoop.security.authentication.server.AuthenticationFilter: Unable to initialize FileSignerSecretProvider, falling back to use random secrets.
2018-09-26 19:23:05,759 INFO org.apache.hadoop.http.HttpRequestLog: Http request log for http.requests.namenode is not defined
2018-09-26 19:23:05,764 INFO org.apache.hadoop.http.HttpServer2: Added global filter 'safety' (class=org.apache.hadoop.http.HttpServer2$QuotingInputFilter)
2018-09-26 19:23:05,766 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: Failed to start namenode.
java.lang.IllegalArgumentException: The value of property bind.address must not be null
at com.google.common.base.Preconditions.checkArgument(Preconditions.java:88)
at org.apache.hadoop.conf.Configuration.set(Configuration.java:1204)
at org.apache.hadoop.conf.Configuration.set(Configuration.java:1185)
at org.apache.hadoop.http.HttpServer2.initializeWebServer(HttpServer2.java:414)
at org.apache.hadoop.http.HttpServer2.<init>(HttpServer2.java:367)
at org.apache.hadoop.http.HttpServer2.<init>(HttpServer2.java:114)
at org.apache.hadoop.http.HttpServer2$Builder.build(HttpServer2.java:296)
at org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.start(NameNodeHttpServer.java:126)
at org.apache.hadoop.hdfs.server.namenode.NameNode.startHttpServer(NameNode.java:761)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:640)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:820)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:804)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1516)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1582)
2018-09-26 19:23:05,768 INFO org.apache.hadoop.util.ExitUtil: Exiting with status 1
2018-09-26 19:23:05,787 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
otsdb_smart_1_18_100
主机名不能有下划线,连接可以改成“-”这样的连接