Hadoop报错“could only be replicated to 0 nodes, instead of 1” .

Hadoop报错“could only be replicated to 0 nodes, instead of 1”

root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop fs -put conf input10/07/18 12:31:05 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:05 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 4
10/07/18 12:31:05 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.propertiescould only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:05 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 3
10/07/18 12:31:06 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:06 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 2
10/07/18 12:31:08 INFO hdfs.DFSClient: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.propertiescould only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:08 WARN hdfs.DFSClient: NotReplicatedYetException sleeping /user/root/input/log4j.properties retries left 1
10/07/18 12:31:11 WARN hdfs.DFSClient: DataStreamer Exception: org.apache.hadoop.ipc.RemoteException: java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:1287)
at org.apache.hadoop.hdfs.server.namenode.NameNode.addBlock(NameNode.java:351)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:481)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:894)

at org.apache.hadoop.ipc.Client.call(Client.java:697)
at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:216)
at $Proxy0.addBlock(Unknown Source)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
at java.lang.reflect.Method.invoke(Method.java:597)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:82)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:59)
at $Proxy0.addBlock(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.locateFollowingBlock(DFSClient.java:2823)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.nextBlockOutputStream(DFSClient.java:2705)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream.access$2000(DFSClient.java:1996)
at org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$DataStreamer.run(DFSClient.java:2182)

10/07/18 12:31:11 WARN hdfs.DFSClient: Error Recovery for block null bad datanode[0] nodes == null
10/07/18 12:31:11 WARN hdfs.DFSClient: Could not get block locations. Source file "/user/root/input/log4j.properties" - Aborting...
put: java.io.IOException: File /user/root/input/log4j.properties could only be replicated to 0 nodes, instead of 1

好长到一段错误代码,呵呵。刚碰到这个问题到时候上网搜了以下,也没有一个很标准的解决方法。大致上说是由于不一致状态导致的。

办法倒是有一个,只不过会丢失掉已有数据,请慎重使用。

1、先把服务都停掉

2、格式化namenode

3、重新启动所有服务

4、可以进行正常操作了

下面是我到解决步骤

root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/stop-all.sh
stopping jobtracker
localhost: stopping tasktracker
no namenode to stop
localhost: no datanode to stop
localhost: stopping secondarynamenode
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop namenode -format
10/07/18 12:46:23 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = scutshuxue-desktop/127.0.1.1
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 0.19.2
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.19 -r 789657; compiled by 'root' on Tue Jun 30 12:40:50 EDT 2009
************************************************************/
Re-format filesystem in /tmp/hadoop-root/dfs/name ? (Y or N) Y
10/07/18 12:46:24 INFO namenode.FSNamesystem: fsOwner=root,root
10/07/18 12:46:24 INFO namenode.FSNamesystem: supergroup=supergroup
10/07/18 12:46:24 INFO namenode.FSNamesystem: isPermissionEnabled=true
10/07/18 12:46:25 INFO common.Storage: Image file of size 94 saved in 0 seconds.
10/07/18 12:46:25 INFO common.Storage: Storage directory /tmp/hadoop-root/dfs/name has been successfully formatted.
10/07/18 12:46:25 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at scutshuxue-desktop/127.0.1.1
************************************************************/
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# ls
bin docs lib README.txt
build.xml hadoop-0.19.2-ant.jar libhdfs src
c++ hadoop-0.19.2-core.jar librecordio test-txt
CHANGES.txt hadoop-0.19.2-examples.jar LICENSE.txt webapps
conf hadoop-0.19.2-test.jar logs
contrib hadoop-0.19.2-tools.jar NOTICE.txt
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/start-all.sh
starting namenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-namenode-scutshuxue-desktop.out
localhost: starting datanode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-datanode-scutshuxue-desktop.out
localhost: starting secondarynamenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-secondarynamenode-scutshuxue-desktop.out
starting jobtracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-jobtracker-scutshuxue-desktop.out
localhost: starting tasktracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-tasktracker-scutshuxue-desktop.out
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop fs -put conf input
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop dfs -ls
Found 1 items
drwxr-xr-x - root supergroup 0 2010-07-18 12:47 /user/root/input

好长到一段错误代码,呵呵。刚碰到这个问题到时候上网搜了以下,也没有一个很标准的解决方法。大致上说是由于不一致状态导致的。

办法倒是有一个,只不过会丢失掉已有数据,请慎重使用。

1、先把服务都停掉

2、格式化namenode

3、重新启动所有服务

4、可以进行正常操作了

下面是我到解决步骤

root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/stop-all.sh
stopping jobtracker
localhost: stopping tasktracker
no namenode to stop
localhost: no datanode to stop
localhost: stopping secondarynamenode
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop namenode -format
10/07/18 12:46:23 INFO namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = scutshuxue-desktop/127.0.1.1
STARTUP_MSG: args = [-format]
STARTUP_MSG: version = 0.19.2
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.19 -r 789657; compiled by 'root' on Tue Jun 30 12:40:50 EDT 2009
************************************************************/
Re-format filesystem in /tmp/hadoop-root/dfs/name ? (Y or N) Y
10/07/18 12:46:24 INFO namenode.FSNamesystem: fsOwner=root,root
10/07/18 12:46:24 INFO namenode.FSNamesystem: supergroup=supergroup
10/07/18 12:46:24 INFO namenode.FSNamesystem: isPermissionEnabled=true
10/07/18 12:46:25 INFO common.Storage: Image file of size 94 saved in 0 seconds.
10/07/18 12:46:25 INFO common.Storage: Storage directory /tmp/hadoop-root/dfs/name has been successfully formatted.
10/07/18 12:46:25 INFO namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at scutshuxue-desktop/127.0.1.1
************************************************************/
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# ls
bin docs lib README.txt
build.xml hadoop-0.19.2-ant.jar libhdfs src
c++ hadoop-0.19.2-core.jar librecordio test-txt
CHANGES.txt hadoop-0.19.2-examples.jar LICENSE.txt webapps
conf hadoop-0.19.2-test.jar logs
contrib hadoop-0.19.2-tools.jar NOTICE.txt
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/start-all.sh
starting namenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-namenode-scutshuxue-desktop.out
localhost: starting datanode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-datanode-scutshuxue-desktop.out
localhost: starting secondarynamenode, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-secondarynamenode-scutshuxue-desktop.out
starting jobtracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-jobtracker-scutshuxue-desktop.out
localhost: starting tasktracker, logging to /home/root/hadoop-0.19.2/bin/../logs/hadoop-root-tasktracker-scutshuxue-desktop.out
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop fs -put conf input
root@scutshuxue-desktop:/home/root/hadoop-0.19.2# bin/hadoop dfs -ls
Found 1 items
drwxr-xr-x - root supergroup 0 2010-07-18 12:47 /user/root/input

你可能感兴趣的:(hadoop)