Hue配置定时调度,名字最好不要使用中文

错误详细信息:

2018-06-01 15:31:07,769 INFO [Thread-71] org.apache.hadoop.service.AbstractService: Service JobHistoryEventHandler failed in state STOPPED; cause: org.apache.hadoop.yarn.exceptions.YarnRuntimeException: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.protocol.FSLimitException$PathComponentTooLongException): The maximum path component name limit of job_1527838151632_0001-1527838257560-hdfs-oozie%3Alauncher%3AT%3Dshell%3AW%3D%E5%90%8C%E6%AD%A5%E8%B4%A7%E4%B8%BB%E6%95%B0%E6%8D%AE%E4%B8%8E%E5%88%86%E6%9E%90%E8%B4%A7%E4%B8%BB%E6%A0%87%E7%AD%BE%2DWF%3AA%3Dshell%2D-1527838267654-1-0-SUCCEEDED-root.users.hdfs-1527838261949.jhist_tmp in directory /user/history/done_intermediate/hdfs is exceeded: limit=255 length=282

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.verifyMaxComponentLength(FSDirectory.java:2224)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addChild(FSDirectory.java:2335)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addLastINode(FSDirectory.java:2304)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addINode(FSDirectory.java:2087)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addFile(FSDirectory.java:390)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2956)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2833)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2718)

at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:608)

at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.create(AuthorizationProviderProxyClientProtocol.java:115)

at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:412)

at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)

at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617)

at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073)

at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2281)

at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2277)

at java.security.AccessController.doPrivileged(Native Method)

at javax.security.auth.Subject.doAs(Subject.java:422)

at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1920)

at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2275)

org.apache.hadoop.yarn.exceptions.YarnRuntimeException: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.protocol.FSLimitException$PathComponentTooLongException): The maximum path component name limit of job_1527838151632_0001-1527838257560-hdfs-oozie%3Alauncher%3AT%3Dshell%3AW%3D%E5%90%8C%E6%AD%A5%E8%B4%A7%E4%B8%BB%E6%95%B0%E6%8D%AE%E4%B8%8E%E5%88%86%E6%9E%90%E8%B4%A7%E4%B8%BB%E6%A0%87%E7%AD%BE%2DWF%3AA%3Dshell%2D-1527838267654-1-0-SUCCEEDED-root.users.hdfs-1527838261949.jhist_tmp in directory /user/history/done_intermediate/hdfs is exceeded: limit=255 length=282

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.verifyMaxComponentLength(FSDirectory.java:2224)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addChild(FSDirectory.java:2335)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addLastINode(FSDirectory.java:2304)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addINode(FSDirectory.java:2087)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addFile(FSDirectory.java:390)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2956)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2833)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2718)

at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:608)

at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.create(AuthorizationProviderProxyClientProtocol.java:115)

at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:412)

at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)

at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617)

at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073)

at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2281)

at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2277)

at java.security.AccessController.doPrivileged(Native Method)

at javax.security.auth.Subject.doAs(Subject.java:422)

at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1920)

at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2275)

at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:624)

at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.serviceStop(JobHistoryEventHandler.java:383)

at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)

at org.apache.hadoop.service.ServiceOperations.stop(ServiceOperations.java:52)

at org.apache.hadoop.service.ServiceOperations.stopQuietly(ServiceOperations.java:80)

at org.apache.hadoop.service.CompositeService.stop(CompositeService.java:157)

at org.apache.hadoop.service.CompositeService.serviceStop(CompositeService.java:131)

at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.serviceStop(MRAppMaster.java:1680)

at org.apache.hadoop.service.AbstractService.stop(AbstractService.java:221)

at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.stop(MRAppMaster.java:1188)

at org.apache.hadoop.mapreduce.v2.app.MRAppMaster.shutDownJob(MRAppMaster.java:614)

at org.apache.hadoop.mapreduce.v2.app.MRAppMaster$JobFinishEventHandler$1.run(MRAppMaster.java:661)

Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.protocol.FSLimitException$PathComponentTooLongException): The maximum path component name limit of job_1527838151632_0001-1527838257560-hdfs-oozie%3Alauncher%3AT%3Dshell%3AW%3D%E5%90%8C%E6%AD%A5%E8%B4%A7%E4%B8%BB%E6%95%B0%E6%8D%AE%E4%B8%8E%E5%88%86%E6%9E%90%E8%B4%A7%E4%B8%BB%E6%A0%87%E7%AD%BE%2DWF%3AA%3Dshell%2D-1527838267654-1-0-SUCCEEDED-root.users.hdfs-1527838261949.jhist_tmp in directory /user/history/done_intermediate/hdfs is exceeded: limit=255 length=282

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.verifyMaxComponentLength(FSDirectory.java:2224)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addChild(FSDirectory.java:2335)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addLastINode(FSDirectory.java:2304)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addINode(FSDirectory.java:2087)

at org.apache.hadoop.hdfs.server.namenode.FSDirectory.addFile(FSDirectory.java:390)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInternal(FSNamesystem.java:2956)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFileInt(FSNamesystem.java:2833)

at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.startFile(FSNamesystem.java:2718)

at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.create(NameNodeRpcServer.java:608)

at org.apache.hadoop.hdfs.server.namenode.AuthorizationProviderProxyClientProtocol.create(AuthorizationProviderProxyClientProtocol.java:115)

at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.create(ClientNamenodeProtocolServerSideTranslatorPB.java:412)

at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)

at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:617)

at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1073)

at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2281)

at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2277)

at java.security.AccessController.doPrivileged(Native Method)

at javax.security.auth.Subject.doAs(Subject.java:422)

at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1920)

at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2275)

at org.apache.hadoop.ipc.Client.call(Client.java:1504)

at org.apache.hadoop.ipc.Client.call(Client.java:1441)

at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:230)

at com.sun.proxy.$Proxy10.create(Unknown Source)

at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.create(ClientNamenodeProtocolTranslatorPB.java:311)

at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)

at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)

at java.lang.reflect.Method.invoke(Method.java:498)

at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:258)

at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:104)

at com.sun.proxy.$Proxy11.create(Unknown Source)

at org.apache.hadoop.hdfs.DFSOutputStream.newStreamForCreate(DFSOutputStream.java:2131)

at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1803)

at org.apache.hadoop.hdfs.DFSClient.create(DFSClient.java:1727)

at org.apache.hadoop.hdfs.DistributedFileSystem$7.doCall(DistributedFileSystem.java:437)

at org.apache.hadoop.hdfs.DistributedFileSystem$7.doCall(DistributedFileSystem.java:433)

at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)

at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:433)

at org.apache.hadoop.hdfs.DistributedFileSystem.create(DistributedFileSystem.java:374)

at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:926)

at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:907)

at org.apache.hadoop.fs.FileSystem.create(FileSystem.java:804)

at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:368)

at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:341)

at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:292)

at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.moveToDoneNow(JobHistoryEventHandler.java:1381)

at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.processDoneFiles(JobHistoryEventHandler.java:1155)

at org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler.handleEvent(JobHistoryEventHandler.java:622)

... 11 more



大意为:  


通过MR 调用的时候,会设置JOB NAME ,如果JOBname为中文,则需要转化类似UNICODE,导致名字实在太长,无法提交



我的名字比较长

下面是经过改正过后的,之前为: 同步XXX数据到HIVE中,然后进行XXX计算

结果就太长勒

Hue配置定时调度,名字最好不要使用中文_第1张图片

你可能感兴趣的:(Hue配置定时调度,名字最好不要使用中文)