20,808
社区成员
发帖
与我相关
我的任务
分享
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://fc.hadoop.001:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/tmp</value>
</property>
</configuration>
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>fc.hadoop.001:9001</value>
</property>
</configuration>
2013-12-18 09:45:49,985 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Web-server up at: 0.0.0.0:50070
2013-12-18 09:45:49,986 INFO org.apache.hadoop.ipc.Server: IPC Server Responder: starting
2013-12-18 09:45:49,988 INFO org.apache.hadoop.ipc.Server: IPC Server listener on 9000: starting
2013-12-18 09:45:49,988 INFO org.apache.hadoop.ipc.Server: IPC Server handler 0 on 9000: starting
2013-12-18 09:45:49,989 INFO org.apache.hadoop.ipc.Server: IPC Server handler 1 on 9000: starting
2013-12-18 09:45:49,990 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 9000: starting
2013-12-18 09:45:49,990 INFO org.apache.hadoop.ipc.Server: IPC Server handler 3 on 9000: starting
2013-12-18 09:45:49,990 INFO org.apache.hadoop.ipc.Server: IPC Server handler 4 on 9000: starting
2013-12-18 09:45:49,991 INFO org.apache.hadoop.ipc.Server: IPC Server handler 5 on 9000: starting
2013-12-18 09:45:49,991 INFO org.apache.hadoop.ipc.Server: IPC Server handler 6 on 9000: starting
2013-12-18 09:45:49,991 INFO org.apache.hadoop.ipc.Server: IPC Server handler 7 on 9000: starting
2013-12-18 09:45:49,991 INFO org.apache.hadoop.ipc.Server: IPC Server handler 8 on 9000: starting
2013-12-18 09:45:50,009 INFO org.apache.hadoop.ipc.Server: IPC Server handler 9 on 9000: starting
2013-12-18 09:45:50,058 WARN org.apache.hadoop.hdfs.server.namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
2013-12-18 09:45:50,071 INFO org.apache.hadoop.hdfs.server.namenode.DecommissionManager: Interrupted Monitor
java.lang.InterruptedException: sleep interrupted
at java.lang.Thread.sleep(Native Method)
at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
at java.lang.Thread.run(Thread.java:662)
2013-12-18 09:45:50,073 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of transactions: 0 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of sy
ncs: 0 SyncTimes(ms): 0
2013-12-18 09:45:50,079 INFO org.apache.hadoop.ipc.Server: Stopping server on 9000
2013-12-18 09:45:50,079 INFO org.apache.hadoop.ipc.Server: IPC Server handler 0 on 9000: exiting
2013-12-18 09:45:50,079 INFO org.apache.hadoop.ipc.Server: IPC Server handler 1 on 9000: exiting
2013-12-18 09:45:50,080 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 9000: exiting
2013-12-19 10:00:10,980 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = fc.hadoop.001/192.168.2.198
STARTUP_MSG: args = []
STARTUP_MSG: version = 0.20.2
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20 -r 911707; compiled by 'chrisdo' on Fri Feb 19 08:07:34 UTC 2010
************************************************************/
2013-12-19 10:00:11,171 INFO org.apache.hadoop.ipc.metrics.RpcMetrics: Initializing RPC Metrics with hostName=NameNode, port=9000
2013-12-19 10:00:11,182 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Namenode up at: fc.hadoop.001/192.168.2.198:9000
2013-12-19 10:00:11,186 INFO org.apache.hadoop.metrics.jvm.JvmMetrics: Initializing JVM Metrics with processName=NameNode, sessionId=null
2013-12-19 10:00:11,188 INFO org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics: Initializing NameNodeMeterics using context object:org.apache.hadoop.metrics.spi.NullContext
2013-12-19 10:00:11,322 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: fsOwner=hadoop,hadoop
2013-12-19 10:00:11,322 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup=supergroup
2013-12-19 10:00:11,322 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isPermissionEnabled=true
2013-12-19 10:00:11,349 INFO org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics: Initializing FSNamesystemMetrics using context object:org.apache.hadoop.metrics.spi.NullContext
2013-12-19 10:00:11,363 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Registered FSNamesystemStatusMBean
2013-12-19 10:00:11,438 INFO org.apache.hadoop.hdfs.server.common.Storage: Number of files = 1
2013-12-19 10:00:11,449 INFO org.apache.hadoop.hdfs.server.common.Storage: Number of files under construction = 0
2013-12-19 10:00:11,449 INFO org.apache.hadoop.hdfs.server.common.Storage: Image file of size 96 loaded in 0 seconds.
2013-12-19 10:00:11,449 INFO org.apache.hadoop.hdfs.server.common.Storage: Edits file /home/hadoop/tmp/dfs/name/current/edits of size 4 edits # 0 loaded in 0 seconds.
2013-12-19 10:00:11,465 INFO org.apache.hadoop.hdfs.server.common.Storage: Image file of size 96 saved in 0 seconds.
2013-12-19 10:00:11,523 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Finished loading FSImage in 242 msecs
2013-12-19 10:00:11,525 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Total number of blocks = 0
2013-12-19 10:00:11,525 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of invalid blocks = 0
2013-12-19 10:00:11,525 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of under-replicated blocks = 0
2013-12-19 10:00:11,525 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of over-replicated blocks = 0
2013-12-19 10:00:11,525 INFO org.apache.hadoop.hdfs.StateChange: STATE* Leaving safe mode after 0 secs.
2013-12-19 10:00:11,526 INFO org.apache.hadoop.hdfs.StateChange: STATE* Network topology has 0 racks and 0 datanodes
2013-12-19 10:00:11,526 INFO org.apache.hadoop.hdfs.StateChange: STATE* UnderReplicatedBlocks has 0 blocks
2013-12-19 10:00:11,784 INFO org.mortbay.log: Logging to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via org.mortbay.log.Slf4jLog
2013-12-19 10:00:11,897 INFO org.apache.hadoop.http.HttpServer: Port returned by webServer.getConnectors()[0].getLocalPort() before open() is -1. Opening the listener on 50070
2013-12-19 10:00:11,899 INFO org.apache.hadoop.http.HttpServer: listener.getLocalPort() returned 50070 webServer.getConnectors()[0].getLocalPort() returned 50070
2013-12-19 10:00:11,899 INFO org.apache.hadoop.http.HttpServer: Jetty bound to port 50070
2013-12-19 10:00:11,899 INFO org.mortbay.log: jetty-6.1.14
2013-12-19 10:01:24,647 INFO org.mortbay.log: Started SelectChannelConnector@0.0.0.0:50070
2013-12-19 10:01:24,649 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Web-server up at: 0.0.0.0:50070
2013-12-19 10:01:24,649 INFO org.apache.hadoop.ipc.Server: IPC Server Responder: starting
2013-12-19 10:01:24,651 INFO org.apache.hadoop.ipc.Server: IPC Server listener on 9000: starting
2013-12-19 10:01:24,656 INFO org.apache.hadoop.ipc.Server: IPC Server handler 0 on 9000: starting
2013-12-19 10:01:24,659 INFO org.apache.hadoop.ipc.Server: IPC Server handler 1 on 9000: starting
2013-12-19 10:01:24,663 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 9000: starting
2013-12-19 10:01:24,663 INFO org.apache.hadoop.ipc.Server: IPC Server handler 3 on 9000: starting
2013-12-19 10:01:24,667 INFO org.apache.hadoop.ipc.Server: IPC Server handler 4 on 9000: starting
2013-12-19 10:01:24,667 INFO org.apache.hadoop.ipc.Server: IPC Server handler 5 on 9000: starting
2013-12-19 10:01:24,671 INFO org.apache.hadoop.ipc.Server: IPC Server handler 6 on 9000: starting
2013-12-19 10:01:24,673 INFO org.apache.hadoop.ipc.Server: IPC Server handler 7 on 9000: starting
2013-12-19 10:01:24,676 INFO org.apache.hadoop.ipc.Server: IPC Server handler 8 on 9000: starting
2013-12-19 10:01:24,680 INFO org.apache.hadoop.ipc.Server: IPC Server handler 9 on 9000: starting
2013-12-19 10:01:24,912 WARN org.apache.hadoop.hdfs.server.namenode.FSNamesystem: ReplicationMonitor thread received InterruptedException.java.lang.InterruptedException: sleep interrupted
2013-12-19 10:01:24,914 INFO org.apache.hadoop.hdfs.server.namenode.DecommissionManager: Interrupted Monitor
java.lang.InterruptedException: sleep interrupted
at java.lang.Thread.sleep(Native Method)
at org.apache.hadoop.hdfs.server.namenode.DecommissionManager$Monitor.run(DecommissionManager.java:65)
at java.lang.Thread.run(Thread.java:662)
2013-12-19 10:01:24,916 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Number of transactions: 0 Total time for transactions(ms): 0Number of transactions batched in Syncs: 0 Number of sy
ncs: 0 SyncTimes(ms): 0
2013-12-19 10:01:24,927 INFO org.apache.hadoop.ipc.Server: Stopping server on 9000
2013-12-19 10:01:24,927 INFO org.apache.hadoop.ipc.Server: IPC Server handler 0 on 9000: exiting
2013-12-19 10:01:24,928 INFO org.apache.hadoop.ipc.Server: IPC Server handler 1 on 9000: exiting
2013-12-19 10:01:24,928 INFO org.apache.hadoop.ipc.Server: IPC Server handler 2 on 9000: exiting
2013-12-19 10:01:24,928 INFO org.apache.hadoop.ipc.Server: IPC Server handler 3 on 9000: exiting
2013-12-19 10:01:24,928 INFO org.apache.hadoop.ipc.Server: IPC Server handler 4 on 9000: exiting
2013-12-19 10:01:24,928 INFO org.apache.hadoop.ipc.Server: IPC Server handler 5 on 9000: exiting
2013-12-19 10:01:24,928 INFO org.apache.hadoop.ipc.Server: IPC Server handler 6 on 9000: exiting
2013-12-19 10:01:24,929 INFO org.apache.hadoop.ipc.Server: IPC Server handler 7 on 9000: exiting
2013-12-19 10:01:24,929 INFO org.apache.hadoop.ipc.Server: IPC Server handler 8 on 9000: exiting
2013-12-19 10:01:24,929 INFO org.apache.hadoop.ipc.Server: IPC Server handler 9 on 9000: exiting
2013-12-19 10:01:24,930 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server listener on 9000
2013-12-19 10:01:24,931 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server Responder
2013-12-19 10:01:24,932 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.io.IOException: Incomplete HDFS URI, no host: hdfs://fc.hadoop.001:9000
at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:78)
at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:1378)
at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:66)
at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:1390)
at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:196)
at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:95)
at org.apache.hadoop.fs.Trash.<init>(Trash.java:62)
at org.apache.hadoop.hdfs.server.namenode.NameNode.startTrashEmptier(NameNode.java:208)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:204)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:279)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:956)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:965)
2013-12-19 10:01:24,941 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at fc.hadoop.001/192.168.2.198
************************************************************/
[root@fc logs]# more /etc/hosts
127.0.0.1 localhost localhost
192.168.2.198 fc.hadoop.001 fc.hadoop.001
192.168.2.199 fc.hadoop.002 fc.hadoop.002
[/quote]
两台机器都配 而且要一样 不行的话就去掉第三列 再不行把hadoop配置文件中的主机名全换成ip
[root@fc logs]# more /etc/hosts
127.0.0.1 localhost localhost
192.168.2.198 fc.hadoop.001 fc.hadoop.001
192.168.2.199 fc.hadoop.002 fc.hadoop.002