[bruce@iRobot hadoop]$ $HADOOP_PREFIX/bin/hdfs dfs -mkdir /user
[bruce@iRobot hadoop]$ $HADOOP_PREFIX/bin/hdfs dfs -mkdir /user/bruce
[bruce@iRobot hadoop]$ tail -f logs/*-namenode*.log
2015-11-19 15:01:20,442 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Number of transactions: 2 Total time for transactions(ms): 11Number of transactions batched in Syncs: 0 Number of syncs: 2 SyncTimes(ms): 37
2015-11-19 15:05:34,036 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Number of transactions: 3 Total time for transactions(ms): 11Number of transactions batched in Syncs: 0 Number of syncs: 3 SyncTimes(ms): 48
----------------------------------------------------------------------------
[bruce@iRobot hadoop]$ $HADOOP_PREFIX/bin/hdfs dfs -ls .
15/11/19 15:07:15 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
[bruce@iRobot hadoop]$ ls
1.txt bin bin-mapreduce1 cloudera etc examples examples-mapreduce1 hellohadoop-1.0-SNAPSHOT.jar include lib libexec logs sbin share src
[bruce@iRobot hadoop]$ cat 1.txt
monkey
sea
funny
hello
world
google
cats
worry
over
new
day
[bruce@iRobot hadoop]$ $HADOOP_PREFIX/bin/hdfs dfs -put 1.txt
[bruce@iRobot hadoop]$
(10分钟后执行展示 hdfs的1.txt文件)
[bruce@iRobot hadoop]$ bin/hdfs dfs -ls .
Found 1 items
-rw-r--r-- 1 bruce oinstall 59 2015-11-19 15:07 1.txt
[bruce@iRobot hadoop]$ bin/hdfs dfs -cat 1.txt
monkey
sea
funny
hello
world
google
cats
worry
over
new
day
[bruce@iRobot hadoop]$
[bruce@iRobot hadoop]$ tail -f logs/*-namenode*.log
2015-11-19 15:07:57,606 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /user/bruce/1.txt._COPYING_. BP-981411196-192.168.100.200-1447912540337 blk_6533808687986917333_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:07:57,858 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_6533808687986917333_1002{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:07:57,863 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /user/bruce/1.txt._COPYING_ is closed by DFSClient_NONMAPREDUCE_1817581491_1
(执行展示 hdfs的1.txt文件后)
2015-11-19 15:17:17,806 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Roll Edit Log from 192.168.100.200
2015-11-19 15:17:17,806 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Rolling edit logs
2015-11-19 15:17:17,806 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Ending log segment 3
2015-11-19 15:17:17,806 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Number of transactions: 10 Total time for transactions(ms): 14Number of transactions batched in Syncs: 0 Number of syncs: 7 SyncTimes(ms): 76
2015-11-19 15:17:17,813 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Number of transactions: 10 Total time for transactions(ms): 14Number of transactions batched in Syncs: 0 Number of syncs: 8 SyncTimes(ms): 83
2015-11-19 15:17:17,814 INFO org.apache.hadoop.hdfs.server.namenode.FileJournalManager: Finalizing edits file /u01/hadoopdata/hadoop-bruce/dfs/name/current/edits_inprogress_0000000000000000003 -> /u01/hadoopdata/hadoop-bruce/dfs/name/current/edits_0000000000000000003-0000000000000000012
2015-11-19 15:17:17,814 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Starting log segment at 13
2015-11-19 15:17:17,949 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://iRobot:50090/getimage?getimage=1&txid=12&storageInfo=-40:337807999:0:CID-4fb81ac3-7e61-42c2-a707-46f2af89a6e2
2015-11-19 15:17:17,968 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Transfer took 0.02s at 0.00 KB/s
2015-11-19 15:17:17,968 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Downloaded file fsimage.ckpt_0000000000000000012 size 358 bytes.
2015-11-19 15:17:17,981 INFO org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager: Going to retain 2 images with txid >= 2
2015-11-19 15:17:17,981 INFO org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager: Purging old image FSImageFile(file=/u01/hadoopdata/hadoop-bruce/dfs/name/current/fsimage_0000000000000000000, cpktTxId=0000000000000000000)
[bruce@iRobot hadoop]$ tail -f logs/*datanode*.log
2015-11-19 15:07:57,791 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_6533808687986917333_1002 src: /192.168.100.200:14244 dest: /192.168.100.200:50010
2015-11-19 15:07:57,858 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:14244, dest: /192.168.100.200:50010, bytes: 59, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_1817581491_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_6533808687986917333_1002, duration: 27240542
2015-11-19 15:07:57,858 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_6533808687986917333_1002, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:08:01,620 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_6533808687986917333_1002
(执行展示 hdfs的1.txt文件后)
2015-11-19 15:24:55,875 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:14676, bytes: 63, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_-1463876014_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_6533808687986917333_1002, duration: 1766219
[bruce@iRobot hadoop]$ tail -f logs/*secondarynamenode*.log
(执行展示 hdfs的1.txt文件后)
2015-11-19 15:17:17,868 INFO org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Image has not changed. Will not download image.
2015-11-19 15:17:17,869 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://192.168.100.200:50070/getimage?getedit=1&startTxId=3&endTxId=12&storageInfo=-40:337807999:0:CID-4fb81ac3-7e61-42c2-a707-46f2af89a6e2
2015-11-19 15:17:17,889 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Transfer took 0.02s at 0.00 KB/s
2015-11-19 15:17:17,889 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Downloaded file edits_tmp_0000000000000000003-0000000000000000012_0000001447917437869 size 0 bytes.
2015-11-19 15:17:17,890 INFO org.apache.hadoop.hdfs.server.namenode.Checkpointer: Checkpointer about to load edits from 1 stream(s).
2015-11-19 15:17:17,890 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Reading /u01/hadoopdata/hadoop-bruce/dfs/namesecondary/current/edits_0000000000000000003-0000000000000000012 expecting start txid #3
2015-11-19 15:17:17,899 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Edits file /u01/hadoopdata/hadoop-bruce/dfs/namesecondary/current/edits_0000000000000000003-0000000000000000012 of size 544 edits # 10 loaded in 0 seconds
2015-11-19 15:17:17,900 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Saving image file /u01/hadoopdata/hadoop-bruce/dfs/namesecondary/current/fsimage.ckpt_0000000000000000012 using no compression
2015-11-19 15:17:17,919 INFO org.apache.hadoop.hdfs.server.namenode.FSImage: Image file of size 358 saved in 0 seconds.
2015-11-19 15:17:17,941 INFO org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager: Going to retain 2 images with txid >= 2
2015-11-19 15:17:17,941 INFO org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager: Purging old image FSImageFile(file=/u01/hadoopdata/hadoop-bruce/dfs/namesecondary/current/fsimage_0000000000000000000, cpktTxId=0000000000000000000)
2015-11-19 15:17:17,946 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Opening connection to http://192.168.100.200:50070/getimage?putimage=1&txid=12&port=50090&storageInfo=-40:337807999:0:CID-4fb81ac3-7e61-42c2-a707-46f2af89a6e2
2015-11-19 15:17:17,988 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Transfer took 0.04s at 0.00 KB/s
2015-11-19 15:17:17,988 INFO org.apache.hadoop.hdfs.server.namenode.TransferFsImage: Uploaded image with txid 12 to namenode at 192.168.100.200:50070
2015-11-19 15:17:17,988 WARN org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode: Checkpoint done. New Image Size: 358
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/
hadoop-bruce
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/
dfs yarn
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/
data name namesecondary
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/name
current in_use.lock
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/name/current/ -lrt
总用量 1052
-rw-r--r-- 1 bruce oinstall 206 11月 19 13:55 VERSION
-rw-r--r-- 1 bruce oinstall 116 11月 19 13:55 fsimage_0000000000000000000
-rw-r--r-- 1 bruce oinstall 62 11月 19 13:55 fsimage_0000000000000000000.md5
-rw-r--r-- 1 bruce oinstall 30 11月 19 14:17 edits_0000000000000000001-0000000000000000002
-rw-r--r-- 1 bruce oinstall 2 11月 19 14:17 seen_txid
-rw-r--r-- 1 bruce oinstall 116 11月 19 14:17 fsimage_0000000000000000002
-rw-r--r-- 1 bruce oinstall 62 11月 19 14:17 fsimage_0000000000000000002.md5
-rw-r--r-- 1 bruce oinstall 1048576 11月 19 15:07 edits_inprogress_0000000000000000003
[bruce@iRobot hadoop]$
(执行展示 hdfs的1.txt文件后的变化)
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/name/current/ -lrt
总用量 1056
-rw-r--r-- 1 bruce oinstall 206 11月 19 13:55 VERSION
-rw-r--r-- 1 bruce oinstall 30 11月 19 14:17 edits_0000000000000000001-0000000000000000002
-rw-r--r-- 1 bruce oinstall 116 11月 19 14:17 fsimage_0000000000000000002
-rw-r--r-- 1 bruce oinstall 62 11月 19 14:17 fsimage_0000000000000000002.md5
-rw-r--r-- 1 bruce oinstall 544 11月 19 15:17 edits_0000000000000000003-0000000000000000012
-rw-r--r-- 1 bruce oinstall 1048576 11月 19 15:17 edits_inprogress_0000000000000000013
-rw-r--r-- 1 bruce oinstall 3 11月 19 15:17 seen_txid
-rw-r--r-- 1 bruce oinstall 358 11月 19 15:17 fsimage_0000000000000000012
-rw-r--r-- 1 bruce oinstall 62 11月 19 15:17 fsimage_0000000000000000012.md5
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/ -lrt
总用量 8
-rw-r--r-- 1 bruce oinstall 11 11月 19 14:06 in_use.lock
drwxr-xr-x 3 bruce oinstall 4096 11月 19 14:06 current
[bruce@iRobot hadoop]$ ls -lrt /u01/hadoopdata/hadoop-bruce/dfs/data/current/
总用量 8
-rw-r--r-- 1 bruce oinstall 189 11月 19 14:06 VERSION
drwx------ 4 bruce oinstall 4096 11月 19 15:08 BP-981411196-192.168.100.200-1447912540337
[bruce@iRobot hadoop]$ ls -lrt /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/
总用量 12
drwxr-xr-x 2 bruce oinstall 4096 11月 19 14:06 tmp
drwxr-xr-x 4 bruce oinstall 4096 11月 19 14:06 current
-rw-r--r-- 1 bruce oinstall 96 11月 19 15:08 dncp_block_verification.log.prev
-rw-r--r-- 1 bruce oinstall 0 11月 19 15:08 dncp_block_verification.log.curr
[bruce@iRobot hadoop]$
[bruce@iRobot hadoop]$ cat /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/dncp_block_verification.log.prev
date="2015-11-19 15:08:01,620" time="1447916881620" genstamp="1002" id="6533808687986917333"
[bruce@iRobot hadoop]$
[bruce@iRobot hadoop]$ cat /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/dncp_block_verification.log.curr
[bruce@iRobot hadoop]$ ls -lrt /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/tmp/
总用量 0
[bruce@iRobot hadoop]$ ls -lrt /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/
总用量 12
-rw-r--r-- 1 bruce oinstall 133 11月 19 14:06 VERSION
drwxr-xr-x 2 bruce oinstall 4096 11月 19 15:07 rbw
drwxr-xr-x 2 bruce oinstall 4096 11月 19 15:07 finalized
[bruce@iRobot hadoop]$ ls -lrt /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/rbw/
总用量 0
[bruce@iRobot hadoop]$ ls -lrt /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/
总用量 8
-rw-r--r-- 1 bruce oinstall 11 11月 19 15:07 blk_6533808687986917333_1002.meta
-rw-r--r-- 1 bruce oinstall 59 11月 19 15:07 blk_6533808687986917333
[bruce@iRobot hadoop]$
[bruce@iRobot hadoop]$ cat /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_6533808687986917333
monkey
sea
funny
hello
world
google
cats
worry
over
new
day
[bruce@iRobot hadoop]$
-------------------------------------------------------------------------------------------------------------------------
执行wordcount job: 15:40左右开始执行
[bruce@iRobot hadoop]$ $HADOOP_PREFIX/bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.0.0-cdh4.5.0.jar wordcount 1.txt 1.sort
15/11/19 15:40:44 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
15/11/19 15:40:45 INFO service.AbstractService: Service:org.apache.hadoop.yarn.client.YarnClientImpl is inited.
15/11/19 15:40:45 INFO service.AbstractService: Service:org.apache.hadoop.yarn.client.YarnClientImpl is started.
15/11/19 15:40:46 INFO input.FileInputFormat: Total input paths to process : 1
15/11/19 15:40:46 INFO mapreduce.JobSubmitter: number of splits:1
15/11/19 15:40:46 WARN conf.Configuration: mapred.jar is deprecated. Instead, use mapreduce.job.jar
15/11/19 15:40:46 WARN conf.Configuration: mapred.output.value.class is deprecated. Instead, use mapreduce.job.output.value.class
15/11/19 15:40:46 WARN conf.Configuration: mapreduce.combine.class is deprecated. Instead, use mapreduce.job.combine.class
15/11/19 15:40:46 WARN conf.Configuration: mapreduce.map.class is deprecated. Instead, use mapreduce.job.map.class
15/11/19 15:40:46 WARN conf.Configuration: mapred.job.name is deprecated. Instead, use mapreduce.job.name
15/11/19 15:40:46 WARN conf.Configuration: mapreduce.reduce.class is deprecated. Instead, use mapreduce.job.reduce.class
15/11/19 15:40:46 WARN conf.Configuration: mapred.input.dir is deprecated. Instead, use mapreduce.input.fileinputformat.inputdir
15/11/19 15:40:46 WARN conf.Configuration: mapred.output.dir is deprecated. Instead, use mapreduce.output.fileoutputformat.outputdir
15/11/19 15:40:46 WARN conf.Configuration: mapred.map.tasks is deprecated. Instead, use mapreduce.job.maps
15/11/19 15:40:46 WARN conf.Configuration: mapred.output.key.class is deprecated. Instead, use mapreduce.job.output.key.class
15/11/19 15:40:46 WARN conf.Configuration: mapred.working.dir is deprecated. Instead, use mapreduce.job.working.dir
15/11/19 15:40:46 INFO mapreduce.JobSubmitter: Submitting tokens for job: job_1447914055057_0001
15/11/19 15:40:46 INFO client.YarnClientImpl: Submitted application application_1447914055057_0001 to ResourceManager at /192.168.100.200:8032
15/11/19 15:40:46 INFO mapreduce.Job: The url to track the job: http://192.168.100.200:54315/proxy/application_1447914055057_0001/
15/11/19 15:40:46 INFO mapreduce.Job: Running job: job_1447914055057_0001
15/11/19 15:40:55 INFO mapreduce.Job: Job job_1447914055057_0001 running in uber mode : false
15/11/19 15:40:55 INFO mapreduce.Job: map 0% reduce 0%
15/11/19 15:40:59 INFO mapreduce.Job: map 100% reduce 0%
15/11/19 15:41:04 INFO mapreduce.Job: map 100% reduce 100%
15/11/19 15:41:04 INFO mapreduce.Job: Job job_1447914055057_0001 completed successfully
15/11/19 15:41:04 INFO mapreduce.Job: Counters: 43
File System Counters
FILE: Number of bytes read=97
FILE: Number of bytes written=145627
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=166
HDFS: Number of bytes written=59
HDFS: Number of read operations=6
HDFS: Number of large read operations=0
HDFS: Number of write operations=2
Job Counters
Launched map tasks=1
Launched reduce tasks=1
Data-local map tasks=1
Total time spent by all maps in occupied slots (ms)=2610
Total time spent by all reduces in occupied slots (ms)=3157
Map-Reduce Framework
Map input records=11
Map output records=11
Map output bytes=103
Map output materialized bytes=97
Input split bytes=107
Combine input records=11
Combine output records=8
Reduce input groups=8
Reduce shuffle bytes=97
Reduce input records=8
Reduce output records=8
Spilled Records=16
Shuffled Maps =1
Failed Shuffles=0
Merged Map outputs=1
GC time elapsed (ms)=37
CPU time spent (ms)=2290
Physical memory (bytes) snapshot=373596160
Virtual memory (bytes) snapshot=1467797504
Total committed heap usage (bytes)=327876608
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=59
File Output Format Counters
Bytes Written=59
[bruce@iRobot hadoop]$
[bruce@iRobot hadoop]$ tail -f logs/*-namenode*.log
(15:40左右开始执行后)
2015-11-19 15:40:45,789 INFO org.apache.hadoop.hdfs.server.namenode.FSEditLog: Number of transactions: 6 Total time for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of syncs: 2 SyncTimes(ms): 32
2015-11-19 15:40:46,012 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.jar. BP-981411196-192.168.100.200-1447912540337 blk_-2593472465878570135_1004{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:40:46,157 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_-2593472465878570135_1004{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:40:46,161 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.jar is closed by DFSClient_NONMAPREDUCE_1732184435_1
2015-11-19 15:40:46,172 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: Increasing replication from 1 to 10 for /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.jar
2015-11-19 15:40:46,258 INFO org.apache.hadoop.hdfs.server.blockmanagement.BlockManager: Increasing replication from 1 to 10 for /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.split
2015-11-19 15:40:46,275 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.split. BP-981411196-192.168.100.200-1447912540337 blk_1039308966393142348_1006{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:40:46,283 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_1039308966393142348_1006{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:40:46,285 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.split is closed by DFSClient_NONMAPREDUCE_1732184435_1
2015-11-19 15:40:46,303 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.splitmetainfo. BP-981411196-192.168.100.200-1447912540337 blk_2736067084384974939_1008{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:40:46,310 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_2736067084384974939_1008{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:40:46,312 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.splitmetainfo is closed by DFSClient_NONMAPREDUCE_1732184435_1
2015-11-19 15:40:46,446 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.xml. BP-981411196-192.168.100.200-1447912540337 blk_-893815870520281883_1010{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:40:46,454 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_-893815870520281883_1010{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:40:46,456 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.xml is closed by DFSClient_NONMAPREDUCE_1732184435_1
2015-11-19 15:40:46,540 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/appTokens. BP-981411196-192.168.100.200-1447912540337 blk_4672632355796939282_1012{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:40:46,550 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_4672632355796939282_1012{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:40:46,551 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/appTokens is closed by DFSClient_NONMAPREDUCE_1732184435_1
2015-11-19 15:40:53,577 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001_1_conf.xml. BP-981411196-192.168.100.200-1447912540337 blk_3501384929198985622_1015{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:40:53,632 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_3501384929198985622_1015{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:40:53,635 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001_1_conf.xml is closed by DFSClient_NONMAPREDUCE_-763675248_1
2015-11-19 15:40:58,167 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001_1.jhist. BP-981411196-192.168.100.200-1447912540337 blk_6917420074953086344_1016{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:40:58,175 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* fsync: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001_1.jhist for DFSClient_NONMAPREDUCE_-763675248_1
2015-11-19 15:41:03,306 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /user/bruce/1.sort/_temporary/1/_temporary/attempt_1447914055057_0001_r_000000_0/part-r-00000. BP-981411196-192.168.100.200-1447912540337 blk_944028033328297845_1018{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:41:03,426 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_944028033328297845_1018{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:41:03,428 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /user/bruce/1.sort/_temporary/1/_temporary/attempt_1447914055057_0001_r_000000_0/part-r-00000 is closed by DFSClient_attempt_1447914055057_0001_r_000000_0_-316876352_1
2015-11-19 15:41:03,699 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /user/bruce/1.sort/_SUCCESS is closed by DFSClient_NONMAPREDUCE_-763675248_1
2015-11-19 15:41:03,725 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_6917420074953086344_1016{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:41:03,727 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001_1.jhist is closed by DFSClient_NONMAPREDUCE_-763675248_1
2015-11-19 15:41:03,743 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/history/done_intermediate/bruce/job_1447914055057_0001.summary_tmp. BP-981411196-192.168.100.200-1447912540337 blk_2001110028991576353_1021{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:41:03,751 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_2001110028991576353_1021{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:41:03,754 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/history/done_intermediate/bruce/job_1447914055057_0001.summary_tmp is closed by DFSClient_NONMAPREDUCE_-763675248_1
2015-11-19 15:41:03,798 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/history/done_intermediate/bruce/job_1447914055057_0001-1447918846827-bruce-word+count-1447918863704-1-1-SUCCEEDED-default.jhist_tmp. BP-981411196-192.168.100.200-1447912540337 blk_1466048723589228798_1023{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:41:03,805 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_1466048723589228798_1023{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:41:03,807 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/history/done_intermediate/bruce/job_1447914055057_0001-1447918846827-bruce-word+count-1447918863704-1-1-SUCCEEDED-default.jhist_tmp is closed by DFSClient_NONMAPREDUCE_-763675248_1
2015-11-19 15:41:03,822 INFO BlockStateChange: BLOCK* addToInvalidates: blk_6917420074953086344_1016 192.168.100.200:50010
2015-11-19 15:41:03,847 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/history/done_intermediate/bruce/job_1447914055057_0001_conf.xml_tmp. BP-981411196-192.168.100.200-1447912540337 blk_885069979340900800_1025{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]}
2015-11-19 15:41:03,856 INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.100.200:50010 is added to blk_885069979340900800_1025{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=[ReplicaUnderConstruction[192.168.100.200:50010|RBW]]} size 0
2015-11-19 15:41:03,858 INFO org.apache.hadoop.hdfs.StateChange: DIR* completeFile: /tmp/hadoop-yarn/staging/history/done_intermediate/bruce/job_1447914055057_0001_conf.xml_tmp is closed by DFSClient_NONMAPREDUCE_-763675248_1
2015-11-19 15:41:03,876 INFO BlockStateChange: BLOCK* addToInvalidates: blk_3501384929198985622_1015 192.168.100.200:50010
2015-11-19 15:41:05,307 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* InvalidateBlocks: ask 192.168.100.200:50010 to delete [blk_6917420074953086344_1016, blk_3501384929198985622_1015]
2015-11-19 15:41:08,720 INFO BlockStateChange: BLOCK* addToInvalidates: blk_4672632355796939282_1012 192.168.100.200:50010
2015-11-19 15:41:08,720 INFO BlockStateChange: BLOCK* addToInvalidates: blk_-2593472465878570135_1004 192.168.100.200:50010
2015-11-19 15:41:08,720 INFO BlockStateChange: BLOCK* addToInvalidates: blk_1039308966393142348_1006 192.168.100.200:50010
2015-11-19 15:41:08,720 INFO BlockStateChange: BLOCK* addToInvalidates: blk_2736067084384974939_1008 192.168.100.200:50010
2015-11-19 15:41:08,720 INFO BlockStateChange: BLOCK* addToInvalidates: blk_-893815870520281883_1010 192.168.100.200:50010
2015-11-19 15:41:11,308 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* InvalidateBlocks: ask 192.168.100.200:50010 to delete [blk_-2593472465878570135_1004, blk_1039308966393142348_1006, blk_-893815870520281883_1010, blk_4672632355796939282_1012, blk_2736067084384974939_1008]
[bruce@iRobot hadoop]$ tail -f logs/*datanode*.log
(15:40左右开始执行后)
2015-11-19 15:40:46,117 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_-2593472465878570135_1004 src: /192.168.100.200:15076 dest: /192.168.100.200:50010
2015-11-19 15:40:46,157 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15076, dest: /192.168.100.200:50010, bytes: 270025, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_1732184435_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_-2593472465878570135_1004, duration: 37140683
2015-11-19 15:40:46,157 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_-2593472465878570135_1004, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:40:46,278 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006 src: /192.168.100.200:15077 dest: /192.168.100.200:50010
2015-11-19 15:40:46,282 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15077, dest: /192.168.100.200:50010, bytes: 114, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_1732184435_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006, duration: 2206109
2015-11-19 15:40:46,282 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:40:46,306 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_2736067084384974939_1008 src: /192.168.100.200:15078 dest: /192.168.100.200:50010
2015-11-19 15:40:46,309 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15078, dest: /192.168.100.200:50010, bytes: 20, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_1732184435_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_2736067084384974939_1008, duration: 1667216
2015-11-19 15:40:46,310 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_2736067084384974939_1008, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:40:46,449 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_-893815870520281883_1010 src: /192.168.100.200:15079 dest: /192.168.100.200:50010
2015-11-19 15:40:46,454 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15079, dest: /192.168.100.200:50010, bytes: 60633, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_1732184435_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_-893815870520281883_1010, duration: 2993709
2015-11-19 15:40:46,454 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_-893815870520281883_1010, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:40:46,544 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_4672632355796939282_1012 src: /192.168.100.200:15080 dest: /192.168.100.200:50010
2015-11-19 15:40:46,549 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15080, dest: /192.168.100.200:50010, bytes: 7, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_1732184435_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_4672632355796939282_1012, duration: 3206357
2015-11-19 15:40:46,549 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_4672632355796939282_1012, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:40:46,706 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_2736067084384974939_1008
2015-11-19 15:40:46,711 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_-2593472465878570135_1004
2015-11-19 15:40:46,712 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_-893815870520281883_1010
2015-11-19 15:40:46,712 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_4672632355796939282_1012
2015-11-19 15:40:46,713 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006
2015-11-19 15:40:46,713 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_6533808687986917333_1002
2015-11-19 15:40:49,125 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15084, bytes: 11, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_1348403566_103, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_4672632355796939282_1012, duration: 268709
2015-11-19 15:40:49,279 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15084, bytes: 272137, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_1348403566_103, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_-2593472465878570135_1004, duration: 16830350
2015-11-19 15:40:49,427 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15084, bytes: 24, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_1348403566_103, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_2736067084384974939_1008, duration: 349276
2015-11-19 15:40:49,523 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15084, bytes: 118, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_1348403566_103, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006, duration: 362667
2015-11-19 15:40:49,602 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15084, bytes: 61109, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_1348403566_103, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_-893815870520281883_1010, duration: 403355
2015-11-19 15:40:52,423 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15090, bytes: 24, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_2736067084384974939_1008, duration: 361849
2015-11-19 15:40:53,621 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_3501384929198985622_1015 src: /192.168.100.200:15092 dest: /192.168.100.200:50010
2015-11-19 15:40:53,631 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15092, dest: /192.168.100.200:50010, bytes: 70769, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_3501384929198985622_1015, duration: 8545605
2015-11-19 15:40:53,632 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_3501384929198985622_1015, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:40:56,716 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_3501384929198985622_1015
2015-11-19 15:40:56,716 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_2736067084384974939_1008
2015-11-19 15:40:56,719 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_-2593472465878570135_1004
2015-11-19 15:40:56,720 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_-893815870520281883_1010
2015-11-19 15:40:56,720 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006
2015-11-19 15:40:57,810 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15100, bytes: 118, op: HDFS_READ, cliID: DFSClient_attempt_1447914055057_0001_m_000000_0_-64191747_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006, duration: 361750
2015-11-19 15:40:57,926 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15100, bytes: 63, op: HDFS_READ, cliID: DFSClient_attempt_1447914055057_0001_m_000000_0_-64191747_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_6533808687986917333_1002, duration: 333222
2015-11-19 15:40:58,170 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_6917420074953086344_1016 src: /192.168.100.200:15102 dest: /192.168.100.200:50010
2015-11-19 15:41:03,407 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_944028033328297845_1018 src: /192.168.100.200:15110 dest: /192.168.100.200:50010
2015-11-19 15:41:03,424 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15110, dest: /192.168.100.200:50010, bytes: 59, op: HDFS_WRITE, cliID: DFSClient_attempt_1447914055057_0001_r_000000_0_-316876352_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_944028033328297845_1018, duration: 15353564
2015-11-19 15:41:03,424 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_944028033328297845_1018, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:41:03,724 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15102, dest: /192.168.100.200:50010, bytes: 31626, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_6917420074953086344_1016, duration: 5551709847
2015-11-19 15:41:03,725 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_6917420074953086344_1016, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:41:03,746 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_2001110028991576353_1021 src: /192.168.100.200:15112 dest: /192.168.100.200:50010
2015-11-19 15:41:03,751 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15112, dest: /192.168.100.200:50010, bytes: 346, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_2001110028991576353_1021, duration: 3260353
2015-11-19 15:41:03,751 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_2001110028991576353_1021, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:41:03,795 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15113, bytes: 31874, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_6917420074953086344_1016, duration: 798697
2015-11-19 15:41:03,800 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_1466048723589228798_1023 src: /192.168.100.200:15114 dest: /192.168.100.200:50010
2015-11-19 15:41:03,805 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15114, dest: /192.168.100.200:50010, bytes: 31626, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_1466048723589228798_1023, duration: 2539352
2015-11-19 15:41:03,805 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_1466048723589228798_1023, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:41:03,844 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:50010, dest: /192.168.100.200:15113, bytes: 71325, op: HDFS_READ, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_3501384929198985622_1015, duration: 584564
2015-11-19 15:41:03,850 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-981411196-192.168.100.200-1447912540337:blk_885069979340900800_1025 src: /192.168.100.200:15115 dest: /192.168.100.200:50010
2015-11-19 15:41:03,855 INFO org.apache.hadoop.hdfs.server.datanode.DataNode.clienttrace: src: /192.168.100.200:15115, dest: /192.168.100.200:50010, bytes: 70769, op: HDFS_WRITE, cliID: DFSClient_NONMAPREDUCE_-763675248_1, offset: 0, srvID: DS-1689463280-192.168.100.200-50010-1447913181338, blockid: BP-981411196-192.168.100.200-1447912540337:blk_885069979340900800_1025, duration: 3315003
2015-11-19 15:41:03,856 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: PacketResponder: BP-981411196-192.168.100.200-1447912540337:blk_885069979340900800_1025, type=LAST_IN_PIPELINE, downstreams=0:[] terminating
2015-11-19 15:41:06,724 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_1466048723589228798_1023
2015-11-19 15:41:06,725 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_6917420074953086344_1016
2015-11-19 15:41:06,727 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_885069979340900800_1025
2015-11-19 15:41:06,728 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_944028033328297845_1018
2015-11-19 15:41:06,728 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_2001110028991576353_1021
2015-11-19 15:41:06,730 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_3501384929198985622_1015
2015-11-19 15:41:06,735 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_-2593472465878570135_1004
2015-11-19 15:41:06,736 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_-893815870520281883_1010
2015-11-19 15:41:06,737 INFO org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner: Verification succeeded for BP-981411196-192.168.100.200-1447912540337:blk_1039308966393142348_1006
2015-11-19 15:41:06,774 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_6917420074953086344_1016 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_6917420074953086344 for deletion
2015-11-19 15:41:06,776 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_3501384929198985622_1015 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_3501384929198985622 for deletion
2015-11-19 15:41:06,776 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-981411196-192.168.100.200-1447912540337 blk_6917420074953086344_1016 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_6917420074953086344
2015-11-19 15:41:06,777 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-981411196-192.168.100.200-1447912540337 blk_3501384929198985622_1015 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_3501384929198985622
2015-11-19 15:41:12,769 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_-2593472465878570135_1004 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_-2593472465878570135 for deletion
2015-11-19 15:41:12,770 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_1039308966393142348_1006 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_1039308966393142348 for deletion
2015-11-19 15:41:12,770 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_-893815870520281883_1010 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_-893815870520281883 for deletion
2015-11-19 15:41:12,770 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_4672632355796939282_1012 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_4672632355796939282 for deletion
2015-11-19 15:41:12,770 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-981411196-192.168.100.200-1447912540337 blk_-2593472465878570135_1004 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_-2593472465878570135
2015-11-19 15:41:12,770 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_2736067084384974939_1008 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_2736067084384974939 for deletion
2015-11-19 15:41:12,770 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-981411196-192.168.100.200-1447912540337 blk_1039308966393142348_1006 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_1039308966393142348
2015-11-19 15:41:12,771 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-981411196-192.168.100.200-1447912540337 blk_-893815870520281883_1010 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_-893815870520281883
2015-11-19 15:41:12,771 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-981411196-192.168.100.200-1447912540337 blk_4672632355796939282_1012 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_4672632355796939282
2015-11-19 15:41:12,771 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Deleted BP-981411196-192.168.100.200-1447912540337 blk_2736067084384974939_1008 file /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/blk_2736067084384974939
[bruce@iRobot hadoop]$ tail -f logs/*resourcemanager*.log
(15:40左右开始执行后)
2015-11-19 15:40:45,911 INFO org.apache.hadoop.yarn.server.resourcemanager.ClientRMService: Allocated new applicationId: 1
2015-11-19 15:40:46,847 INFO org.apache.hadoop.yarn.server.resourcemanager.ClientRMService: Storing Application with id application_1447914055057_0001
2015-11-19 15:40:46,849 INFO org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore: Storing info for app: application_1447914055057_0001
2015-11-19 15:40:46,849 INFO org.apache.hadoop.yarn.server.resourcemanager.ClientRMService: Application with id 1 submitted by user bruce
2015-11-19 15:40:46,856 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce IP=192.168.100.200 OPERATION=Submit Application Request TARGET=ClientRMService RESULT=SUCCESS APPID=application_1447914055057_0001
2015-11-19 15:40:46,881 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl: application_1447914055057_0001 State change from NEW to SUBMITTED
2015-11-19 15:40:46,882 INFO org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService: Registering appattempt_1447914055057_0001_000001
2015-11-19 15:40:46,882 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from NEW to SUBMITTED
2015-11-19 15:40:46,913 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler: Application Submission: application_1447914055057_0001 from bruce, currently active: 1
2015-11-19 15:40:46,917 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from SUBMITTED to SCHEDULED
2015-11-19 15:40:46,918 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl: application_1447914055057_0001 State change from SUBMITTED to ACCEPTED
2015-11-19 15:40:47,826 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000001 Container Transitioned from NEW to ALLOCATED
2015-11-19 15:40:47,826 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce OPERATION=AM Allocated Container TARGET=SchedulerApp RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000001
2015-11-19 15:40:47,828 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode: Assigned container container_1447914055057_0001_01_000001 of capacity on host iRobot:11000, which currently has 1 containers, used and available
2015-11-19 15:40:47,831 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000001 Container Transitioned from ALLOCATED to ACQUIRED
2015-11-19 15:40:47,836 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: Storing attempt: AppId: application_1447914055057_0001 AttemptId: appattempt_1447914055057_0001_000001 MasterContainer: Container: [ContainerId: container_1447914055057_0001_01_000001, NodeId: iRobot:11000, NodeHttpAddress: iRobot:8042, Resource: , Priority: 0, State: NEW, Token: null, Status: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_NEW, ]
2015-11-19 15:40:47,839 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from SCHEDULED to ALLOCATED_SAVING
2015-11-19 15:40:47,841 INFO org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore: Storing info for attempt: appattempt_1447914055057_0001_000001
2015-11-19 15:40:47,845 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from ALLOCATED_SAVING to ALLOCATED
2015-11-19 15:40:47,850 INFO org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher: Launching masterappattempt_1447914055057_0001_000001
2015-11-19 15:40:47,878 INFO org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher: Setting up container Container: [ContainerId: container_1447914055057_0001_01_000001, NodeId: iRobot:11000, NodeHttpAddress: iRobot:8042, Resource: , Priority: 0, State: NEW, Token: null, Status: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_NEW, ] for AM appattempt_1447914055057_0001_000001
2015-11-19 15:40:47,878 INFO org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher: Command to launch container container_1447914055057_0001_01_000001 : $JAVA_HOME/bin/java -Dlog4j.configuration=container-log4j.properties -Dyarn.app.mapreduce.container.log.dir= -Dyarn.app.mapreduce.container.log.filesize=0 -Dhadoop.root.logger=INFO,CLA -Xmx1024m org.apache.hadoop.mapreduce.v2.app.MRAppMaster 1>/stdout 2>/stderr
2015-11-19 15:40:48,278 INFO org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher: Done launching container Container: [ContainerId: container_1447914055057_0001_01_000001, NodeId: iRobot:11000, NodeHttpAddress: iRobot:8042, Resource: , Priority: 0, State: NEW, Token: null, Status: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_NEW, ] for AM appattempt_1447914055057_0001_000001
2015-11-19 15:40:48,278 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from ALLOCATED to LAUNCHED
2015-11-19 15:40:48,863 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000001 Container Transitioned from ACQUIRED to RUNNING
2015-11-19 15:40:53,318 INFO org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService: AM registration appattempt_1447914055057_0001_000001
2015-11-19 15:40:53,319 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce IP=192.168.100.200 OPERATION=Register App Master TARGET=ApplicationMasterService RESULT=SUCCESS APPID=application_1447914055057_0001 APPATTEMPTID=appattempt_1447914055057_0001_000001
2015-11-19 15:40:53,321 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from LAUNCHED to RUNNING
2015-11-19 15:40:53,322 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl: application_1447914055057_0001 State change from ACCEPTED to RUNNING
2015-11-19 15:40:54,879 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000002 Container Transitioned from NEW to ALLOCATED
2015-11-19 15:40:54,880 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce OPERATION=AM Allocated Container TARGET=SchedulerApp RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000002
2015-11-19 15:40:54,880 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode: Assigned container container_1447914055057_0001_01_000002 of capacity on host iRobot:11000, which currently has 2 containers, used and available
2015-11-19 15:40:55,382 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000002 Container Transitioned from ALLOCATED to ACQUIRED
2015-11-19 15:40:55,885 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000002 Container Transitioned from ACQUIRED to RUNNING
2015-11-19 15:40:59,173 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000002 Container Transitioned from RUNNING to COMPLETED
2015-11-19 15:40:59,173 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp: Completed container: container_1447914055057_0001_01_000002 in state: COMPLETED event:FINISHED
2015-11-19 15:40:59,174 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce OPERATION=AM Released Container TARGET=SchedulerApp RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000002
2015-11-19 15:40:59,174 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode: Released container container_1447914055057_0001_01_000002 of capacity on host iRobot:11000, which currently has 1 containers, used and available, release resources=true
2015-11-19 15:40:59,174 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler: Application appattempt_1447914055057_0001_000001 released container container_1447914055057_0001_01_000002 on node: host: iRobot:11000 #containers=1 available=2048 used=2048 with event: FINISHED
2015-11-19 15:41:00,174 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000003 Container Transitioned from NEW to ALLOCATED
2015-11-19 15:41:00,174 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce OPERATION=AM Allocated Container TARGET=SchedulerApp RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000003
2015-11-19 15:41:00,174 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode: Assigned container container_1447914055057_0001_01_000003 of capacity on host iRobot:11000, which currently has 2 containers, used and available
2015-11-19 15:41:00,445 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000003 Container Transitioned from ALLOCATED to ACQUIRED
2015-11-19 15:41:01,178 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000003 Container Transitioned from ACQUIRED to RUNNING
2015-11-19 15:41:04,652 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000003 Container Transitioned from RUNNING to COMPLETED
2015-11-19 15:41:04,653 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp: Completed container: container_1447914055057_0001_01_000003 in state: COMPLETED event:FINISHED
2015-11-19 15:41:04,653 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce OPERATION=AM Released Container TARGET=SchedulerApp RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000003
2015-11-19 15:41:04,653 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode: Released container container_1447914055057_0001_01_000003 of capacity on host iRobot:11000, which currently has 1 containers, used and available, release resources=true
2015-11-19 15:41:04,653 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler: Application appattempt_1447914055057_0001_000001 released container container_1447914055057_0001_01_000003 on node: host: iRobot:11000 #containers=1 available=2048 used=2048 with event: FINISHED
2015-11-19 15:41:08,734 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from RUNNING to FINISHING
2015-11-19 15:41:08,735 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl: application_1447914055057_0001 State change from RUNNING to FINISHING
2015-11-19 15:41:09,668 INFO org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl: container_1447914055057_0001_01_000001 Container Transitioned from RUNNING to COMPLETED
2015-11-19 15:41:09,668 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp: Completed container: container_1447914055057_0001_01_000001 in state: COMPLETED event:FINISHED
2015-11-19 15:41:09,668 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce OPERATION=AM Released Container TARGET=SchedulerApp RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000001
2015-11-19 15:41:09,668 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode: Released container container_1447914055057_0001_01_000001 of capacity on host iRobot:11000, which currently has 0 containers, used and available, release resources=true
2015-11-19 15:41:09,668 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler: Application appattempt_1447914055057_0001_000001 released container container_1447914055057_0001_01_000001 on node: host: iRobot:11000 #containers=0 available=4096 used=0 with event: FINISHED
2015-11-19 15:41:09,670 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl: appattempt_1447914055057_0001_000001 State change from FINISHING to FINISHED
2015-11-19 15:41:09,671 INFO org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl: application_1447914055057_0001 State change from FINISHING to FINISHED
2015-11-19 15:41:09,672 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger: USER=bruce OPERATION=Application Finished - Succeeded TARGET=RMAppManager RESULT=SUCCESS APPID=application_1447914055057_0001
2015-11-19 15:41:09,672 INFO org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncher: Cleaning master appattempt_1447914055057_0001_000001
2015-11-19 15:41:09,673 INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.AppSchedulingInfo: Application application_1447914055057_0001 requests cleared
2015-11-19 15:41:09,675 INFO org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore: Removing info for app: application_1447914055057_0001
2015-11-19 15:41:09,679 INFO org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary: appId=application_1447914055057_0001,name=word count,user=bruce,queue=default,state=FINISHED,trackingUrl=192.168.100.200:54315/proxy/application_1447914055057_0001/jobhistory/job/job_1447914055057_0001,appMasterHost=iRobot,startTime=1447918846844,finishTime=1447918868735
[bruce@iRobot hadoop]$ tail -f logs/*nodemanager*.log
(15:40左右开始执行后)
2015-11-19 15:40:48,232 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Start request for container_1447914055057_0001_01_000001 by user bruce
2015-11-19 15:40:48,261 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Creating a new application reference for app application_1447914055057_0001
2015-11-19 15:40:48,265 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce IP=192.168.100.200 OPERATION=Start Container Request TARGET=ContainerManageImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000001
2015-11-19 15:40:48,268 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1447914055057_0001 transitioned from NEW to INITING
2015-11-19 15:40:48,270 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Adding container_1447914055057_0001_01_000001 to application application_1447914055057_0001
2015-11-19 15:40:48,279 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1447914055057_0001 transitioned from INITING to RUNNING
2015-11-19 15:40:48,288 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000001 transitioned from NEW to LOCALIZING
2015-11-19 15:40:48,298 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/appTokens transitioned from INIT to DOWNLOADING
2015-11-19 15:40:48,298 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.jar transitioned from INIT to DOWNLOADING
2015-11-19 15:40:48,298 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.splitmetainfo transitioned from INIT to DOWNLOADING
2015-11-19 15:40:48,298 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.split transitioned from INIT to DOWNLOADING
2015-11-19 15:40:48,298 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.xml transitioned from INIT to DOWNLOADING
2015-11-19 15:40:48,298 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService: Created localizer for container_1447914055057_0001_01_000001
2015-11-19 15:40:48,437 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService: Writing credentials to the nmPrivate file /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/nmPrivate/container_1447914055057_0001_01_000001.tokens. Credentials list:
2015-11-19 15:40:48,454 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Initializing user bruce
2015-11-19 15:40:48,547 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Copying from /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/nmPrivate/container_1447914055057_0001_01_000001.tokens to /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001/container_1447914055057_0001_01_000001.tokens
2015-11-19 15:40:48,547 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: CWD set to /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001 = file:/u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001
2015-11-19 15:40:48,858 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:49,205 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/appTokens transitioned from DOWNLOADING to LOCALIZED
2015-11-19 15:40:49,369 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.jar transitioned from DOWNLOADING to LOCALIZED
2015-11-19 15:40:49,454 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.splitmetainfo transitioned from DOWNLOADING to LOCALIZED
2015-11-19 15:40:49,549 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.split transitioned from DOWNLOADING to LOCALIZED
2015-11-19 15:40:49,631 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalizedResource: Resource hdfs://192.168.100.200:9000/tmp/hadoop-yarn/staging/bruce/.staging/job_1447914055057_0001/job.xml transitioned from DOWNLOADING to LOCALIZED
2015-11-19 15:40:49,633 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000001 transitioned from LOCALIZING to LOCALIZED
2015-11-19 15:40:49,705 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000001 transitioned from LOCALIZED to RUNNING
2015-11-19 15:40:49,797 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: launchContainer: [bash, /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001/container_1447914055057_0001_01_000001/default_container_executor.sh]
2015-11-19 15:40:49,862 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:50,865 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:51,864 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Starting resource-monitoring for container_1447914055057_0001_01_000001
2015-11-19 15:40:51,868 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:51,952 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 9829 for container-id container_1447914055057_0001_01_000001: 111.6 MB of 2 GB physical memory used; 1.6 GB of 4.2 GB virtual memory used
2015-11-19 15:40:52,871 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:53,874 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:54,877 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:55,013 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 9829 for container-id container_1447914055057_0001_01_000001: 151.4 MB of 2 GB physical memory used; 1.6 GB of 4.2 GB virtual memory used
2015-11-19 15:40:55,529 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Start request for container_1447914055057_0001_01_000002 by user bruce
2015-11-19 15:40:55,529 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce IP=192.168.100.200 OPERATION=Start Container Request TARGET=ContainerManageImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000002
2015-11-19 15:40:55,532 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Adding container_1447914055057_0001_01_000002 to application application_1447914055057_0001
2015-11-19 15:40:55,535 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000002 transitioned from NEW to LOCALIZING
2015-11-19 15:40:55,535 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices: Got event APPLICATION_INIT for appId application_1447914055057_0001
2015-11-19 15:40:55,536 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices: Got APPLICATION_INIT for service mapreduce.shuffle
2015-11-19 15:40:55,540 INFO org.apache.hadoop.mapred.ShuffleHandler: Added token for job_1447914055057_0001
2015-11-19 15:40:55,541 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000002 transitioned from LOCALIZING to LOCALIZED
2015-11-19 15:40:55,597 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000002 transitioned from LOCALIZED to RUNNING
2015-11-19 15:40:55,686 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: launchContainer: [bash, /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001/container_1447914055057_0001_01_000002/default_container_executor.sh]
2015-11-19 15:40:55,880 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:55,882 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 2, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:56,884 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:56,885 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 2, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:57,887 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:57,888 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 2, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:58,013 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Starting resource-monitoring for container_1447914055057_0001_01_000002
2015-11-19 15:40:58,063 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 9972 for container-id container_1447914055057_0001_01_000002: 217.4 MB of 1 GB physical memory used; 696.8 MB of 2.1 GB virtual memory used
2015-11-19 15:40:58,113 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 9829 for container-id container_1447914055057_0001_01_000001: 152.6 MB of 2 GB physical memory used; 1.6 GB of 4.2 GB virtual memory used
2015-11-19 15:40:58,142 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce IP=192.168.100.200 OPERATION=Stop Container Request TARGET=ContainerManageImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000002
2015-11-19 15:40:58,145 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000002 transitioned from RUNNING to KILLING
2015-11-19 15:40:58,145 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Cleaning up container container_1447914055057_0001_01_000002
2015-11-19 15:40:58,146 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:58,148 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 2, }, state: C_RUNNING, diagnostics: "Container killed by the ApplicationMaster.\n", exit_status: -1000,
2015-11-19 15:40:58,288 WARN org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Exit code from task is : 143
2015-11-19 15:40:58,288 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor:
2015-11-19 15:40:58,292 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000002 transitioned from KILLING to CONTAINER_CLEANEDUP_AFTER_KILL
2015-11-19 15:40:58,295 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Deleting absolute path : /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001/container_1447914055057_0001_01_000002
2015-11-19 15:40:58,296 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce OPERATION=Container Finished - Killed TARGET=ContainerImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000002
2015-11-19 15:40:58,300 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000002 transitioned from CONTAINER_CLEANEDUP_AFTER_KILL to DONE
2015-11-19 15:40:58,301 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Removing container_1447914055057_0001_01_000002 from application application_1447914055057_0001
2015-11-19 15:40:59,167 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:40:59,168 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 2, }, state: C_COMPLETE, diagnostics: "Container killed by the ApplicationMaster.\n\n", exit_status: 143,
2015-11-19 15:40:59,168 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Removed completed container container_1447914055057_0001_01_000002
2015-11-19 15:41:00,172 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:00,480 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Start request for container_1447914055057_0001_01_000003 by user bruce
2015-11-19 15:41:00,480 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce IP=192.168.100.200 OPERATION=Start Container Request TARGET=ContainerManageImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000003
2015-11-19 15:41:00,482 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Adding container_1447914055057_0001_01_000003 to application application_1447914055057_0001
2015-11-19 15:41:00,483 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000003 transitioned from NEW to LOCALIZING
2015-11-19 15:41:00,483 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices: Got event APPLICATION_INIT for appId application_1447914055057_0001
2015-11-19 15:41:00,483 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices: Got APPLICATION_INIT for service mapreduce.shuffle
2015-11-19 15:41:00,483 INFO org.apache.hadoop.mapred.ShuffleHandler: Added token for job_1447914055057_0001
2015-11-19 15:41:00,484 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000003 transitioned from LOCALIZING to LOCALIZED
2015-11-19 15:41:00,541 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000003 transitioned from LOCALIZED to RUNNING
2015-11-19 15:41:00,633 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: launchContainer: [bash, /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001/container_1447914055057_0001_01_000003/default_container_executor.sh]
2015-11-19 15:41:01,113 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Starting resource-monitoring for container_1447914055057_0001_01_000003
2015-11-19 15:41:01,113 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Stopping resource-monitoring for container_1447914055057_0001_01_000002
2015-11-19 15:41:01,170 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 9829 for container-id container_1447914055057_0001_01_000001: 153.0 MB of 2 GB physical memory used; 1.6 GB of 4.2 GB virtual memory used
2015-11-19 15:41:01,175 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:01,176 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 3, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:01,246 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 10082 for container-id container_1447914055057_0001_01_000003: 50.1 MB of 1 GB physical memory used; 685.6 MB of 2.1 GB virtual memory used
2015-11-19 15:41:02,178 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:02,179 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 3, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:03,181 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:03,182 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 3, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:03,636 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce IP=192.168.100.200 OPERATION=Stop Container Request TARGET=ContainerManageImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000003
2015-11-19 15:41:03,639 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000003 transitioned from RUNNING to KILLING
2015-11-19 15:41:03,639 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:03,640 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Cleaning up container container_1447914055057_0001_01_000003
2015-11-19 15:41:03,641 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 3, }, state: C_RUNNING, diagnostics: "Container killed by the ApplicationMaster.\n", exit_status: -1000,
2015-11-19 15:41:03,749 WARN org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Exit code from task is : 143
2015-11-19 15:41:03,749 INFO org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor:
2015-11-19 15:41:03,750 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000003 transitioned from KILLING to CONTAINER_CLEANEDUP_AFTER_KILL
2015-11-19 15:41:03,751 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Deleting absolute path : /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001/container_1447914055057_0001_01_000003
2015-11-19 15:41:03,752 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce OPERATION=Container Finished - Killed TARGET=ContainerImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000003
2015-11-19 15:41:03,752 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000003 transitioned from CONTAINER_CLEANEDUP_AFTER_KILL to DONE
2015-11-19 15:41:03,752 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Removing container_1447914055057_0001_01_000003 from application application_1447914055057_0001
2015-11-19 15:41:04,246 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Stopping resource-monitoring for container_1447914055057_0001_01_000003
2015-11-19 15:41:04,303 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 9829 for container-id container_1447914055057_0001_01_000001: 153.7 MB of 2 GB physical memory used; 1.6 GB of 4.2 GB virtual memory used
2015-11-19 15:41:04,647 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:04,648 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 3, }, state: C_COMPLETE, diagnostics: "Container killed by the ApplicationMaster.\n\n", exit_status: 143,
2015-11-19 15:41:04,649 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Removed completed container container_1447914055057_0001_01_000003
2015-11-19 15:41:05,652 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:06,655 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:07,358 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Memory usage of ProcessTree 9829 for container-id container_1447914055057_0001_01_000001: 153.8 MB of 2 GB physical memory used; 1.6 GB of 4.2 GB virtual memory used
2015-11-19 15:41:07,658 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:08,661 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_RUNNING, diagnostics: "", exit_status: -1000,
2015-11-19 15:41:08,896 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Container container_1447914055057_0001_01_000001 succeeded
2015-11-19 15:41:08,897 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000001 transitioned from RUNNING to EXITED_WITH_SUCCESS
2015-11-19 15:41:08,898 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch: Cleaning up container container_1447914055057_0001_01_000001
2015-11-19 15:41:08,925 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Deleting absolute path : /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001/container_1447914055057_0001_01_000001
2015-11-19 15:41:08,925 INFO org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=bruce OPERATION=Container Finished - Succeeded TARGET=ContainerImpl RESULT=SUCCESS APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000001
2015-11-19 15:41:08,925 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container: Container container_1447914055057_0001_01_000001 transitioned from EXITED_WITH_SUCCESS to DONE
2015-11-19 15:41:08,925 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Removing container_1447914055057_0001_01_000001 from application application_1447914055057_0001
2015-11-19 15:41:09,664 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Sending out status for container: container_id {, app_attempt_id {, application_id {, id: 1, cluster_timestamp: 1447914055057, }, attemptId: 1, }, id: 1, }, state: C_COMPLETE, diagnostics: "", exit_status: 0,
2015-11-19 15:41:09,664 INFO org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl: Removed completed container container_1447914055057_0001_01_000001
2015-11-19 15:41:09,683 WARN org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl: Trying to stop unknown container container_1447914055057_0001_01_000001
2015-11-19 15:41:09,684 WARN org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger: USER=UnknownUser IP=192.168.100.200 OPERATION=Stop Container Request TARGET=ContainerManagerImpl RESULT=FAILURE DESCRIPTION=Trying to stop unknown container!APPID=application_1447914055057_0001 CONTAINERID=container_1447914055057_0001_01_000001
2015-11-19 15:41:10,359 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorImpl: Stopping resource-monitoring for container_1447914055057_0001_01_000001
2015-11-19 15:41:10,672 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1447914055057_0001 transitioned from RUNNING to APPLICATION_RESOURCES_CLEANINGUP
2015-11-19 15:41:10,673 INFO org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor: Deleting absolute path : /u01/hadoopdata/hadoop-bruce/yarn/nmlocal/usercache/bruce/appcache/application_1447914055057_0001
2015-11-19 15:41:10,673 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.AuxServices: Got event APPLICATION_STOP for appId application_1447914055057_0001
2015-11-19 15:41:10,674 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application: Application application_1447914055057_0001 transitioned from APPLICATION_RESOURCES_CLEANINGUP to FINISHED
2015-11-19 15:41:10,675 INFO org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.NonAggregatingLogHandler: Scheduling Log Deletion for application: application_1447914055057_0001, with delay of 10800 seconds
(15:40左右开始执行,job完成后查看)
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/name/current/ -lrt
总用量 1056
-rw-r--r-- 1 bruce oinstall 206 11月 19 13:55 VERSION
-rw-r--r-- 1 bruce oinstall 30 11月 19 14:17 edits_0000000000000000001-0000000000000000002
-rw-r--r-- 1 bruce oinstall 116 11月 19 14:17 fsimage_0000000000000000002
-rw-r--r-- 1 bruce oinstall 62 11月 19 14:17 fsimage_0000000000000000002.md5
-rw-r--r-- 1 bruce oinstall 544 11月 19 15:17 edits_0000000000000000003-0000000000000000012
-rw-r--r-- 1 bruce oinstall 3 11月 19 15:17 seen_txid
-rw-r--r-- 1 bruce oinstall 358 11月 19 15:17 fsimage_0000000000000000012
-rw-r--r-- 1 bruce oinstall 62 11月 19 15:17 fsimage_0000000000000000012.md5
-rw-r--r-- 1 bruce oinstall 1048576 11月 19 15:41 edits_inprogress_0000000000000000013
(15:40左右开始执行,job完成后查看)
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/namesecondary/current/ -lrt
总用量 28
-rw-r--r-- 1 bruce oinstall 30 11月 19 14:17 edits_0000000000000000001-0000000000000000002
-rw-r--r-- 1 bruce oinstall 116 11月 19 14:17 fsimage_0000000000000000002
-rw-r--r-- 1 bruce oinstall 62 11月 19 14:17 fsimage_0000000000000000002.md5
-rw-r--r-- 1 bruce oinstall 544 11月 19 15:17 edits_0000000000000000003-0000000000000000012
-rw-r--r-- 1 bruce oinstall 358 11月 19 15:17 fsimage_0000000000000000012
-rw-r--r-- 1 bruce oinstall 62 11月 19 15:17 fsimage_0000000000000000012.md5
-rw-r--r-- 1 bruce oinstall 206 11月 19 15:17 VERSION
(15:40左右开始执行,job完成后查看)
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/
current/ in_use.lock
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/ -lrt
总用量 8
-rw-r--r-- 1 bruce oinstall 189 11月 19 14:06 VERSION
drwx------ 4 bruce oinstall 4096 11月 19 15:41 BP-981411196-192.168.100.200-1447912540337
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/
current dncp_block_verification.log.curr dncp_block_verification.log.prev tmp
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/ -lrt
总用量 12
drwxr-xr-x 2 bruce oinstall 4096 11月 19 14:06 tmp
drwxr-xr-x 4 bruce oinstall 4096 11月 19 14:06 current
-rw-r--r-- 1 bruce oinstall 863 11月 19 15:41 dncp_block_verification.log.prev
-rw-r--r-- 1 bruce oinstall 0 11月 19 15:41 dncp_block_verification.log.curr
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/tmp/
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/
finalized rbw VERSION
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/
blk_1466048723589228798 blk_2001110028991576353 blk_6533808687986917333 blk_885069979340900800 blk_944028033328297845
blk_1466048723589228798_1023.meta blk_2001110028991576353_1021.meta blk_6533808687986917333_1002.meta blk_885069979340900800_1025.meta blk_944028033328297845_1018.meta
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/finalized/ -lrt
总用量 136
-rw-r--r-- 1 bruce oinstall 11 11月 19 15:07 blk_6533808687986917333_1002.meta
-rw-r--r-- 1 bruce oinstall 59 11月 19 15:07 blk_6533808687986917333
-rw-r--r-- 1 bruce oinstall 11 11月 19 15:41 blk_944028033328297845_1018.meta
-rw-r--r-- 1 bruce oinstall 59 11月 19 15:41 blk_944028033328297845
-rw-r--r-- 1 bruce oinstall 11 11月 19 15:41 blk_2001110028991576353_1021.meta
-rw-r--r-- 1 bruce oinstall 346 11月 19 15:41 blk_2001110028991576353
-rw-r--r-- 1 bruce oinstall 31626 11月 19 15:41 blk_1466048723589228798
-rw-r--r-- 1 bruce oinstall 255 11月 19 15:41 blk_1466048723589228798_1023.meta
-rw-r--r-- 1 bruce oinstall 563 11月 19 15:41 blk_885069979340900800_1025.meta
-rw-r--r-- 1 bruce oinstall 70769 11月 19 15:41 blk_885069979340900800
[bruce@iRobot hadoop]$ ls /u01/hadoopdata/hadoop-bruce/dfs/data/current/BP-981411196-192.168.100.200-1447912540337/current/rbw/ -lrt
总用量 0
[bruce@iRobot hadoop]$