Bulk load学习总结以及异常处理

Bulk load学习总结以及异常处理
异常:org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /bank/output/_temporary/0/_temporary/attempt_local1723295629_0001_r_000001_0/C1/29985554bbab499986a01d869d60564d (inode 17432): File does not exist. Holder DFSClient_NONMAPREDUCE_503753593_1 does not have any open files.

在需要将海量数据写入到HBase时,通过Bulk load(大容量加载)的方式,会变得更高效。可以这么说,进行大量数据操作,Bulk load是必不可少的。
我们知道,HBase的数据最终是需要持久化到HDFS。HDFS是一个文件系统,那么数据可定是以一定的格式存储到里面的。例如:Hive我们可以以ORC、Parquet等方式存储。
而HBase也有自己的数据格式,那就是HFile。Bulk Load就是直接将数据写入到StoreFile(HFile)中,从而绕开与HBase的交互,HFile生成后,直接一次性建立与HBase的关联即可。
使用BulkLoad,绕过了Write to WAL,Write to MemStore及Flush to disk的过程银行每天都产生大量的转账记录,超过一定时期的数据,需要定期进行备份存储。本案例,在MySQL
中有大量转账记录数据,需要将这些数据保存到HBase中。因为数据量非常庞大,所以采用的是Bulk Load方式来加载数据。

背景
我们为了方便数据备份,每天都会将对应的转账记录导出为CSV文本文件,并上传到HDFS。我们需要做的就将HDFS上的文件导入到HBase中。

 

//----------------map程序------------
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;

import java.io.IOException;

/**
* 完成将hdfs的数据转为hfile格式的文件
* k1,v1
* k2:ImmutableBytesWritable
* V2:Put
* 通过Put添加key和value将数据添加到hbase中
* key和value都是bytes类型
* */
public class BuckLoderMapper extends Mapper<LongWritable, Text, ImmutableBytesWritable, Put>
{
private int i=0,j=0;
@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
//1.转换Text以逗号分隔的数据集进行切分

String line = value.toString();
if(
line!=null&&!"".equalsIgnoreCase(line)
){
i++;
System.out.println("-进入第"+i+"转换-");
String[] fileds = line.split(",");
byte[] rowkey = fileds[0].getBytes();
ImmutableBytesWritable immutableBytesWritable = new ImmutableBytesWritable();

immutableBytesWritable.set(rowkey);
Put put = new Put(rowkey);
//2.通过ImmutableBytesWritable获取rowkey,将数据转换成bytes类型,将列数据put进入hbase
put.addColumn("C1".getBytes(),"code".getBytes(),fileds[1].getBytes());
put.addColumn("C1".getBytes(), "rec_account".getBytes(), fileds[2].getBytes());
put.addColumn("C1".getBytes(), "rec_bank_name".getBytes(), fileds[3].getBytes());
put.addColumn("C1".getBytes(), "rec_name".getBytes(), fileds[4].getBytes());
put.addColumn("C1".getBytes(), "pay_account".getBytes(), fileds[5].getBytes());
put.addColumn("C1".getBytes(), "pay_name".getBytes(), fileds[6].getBytes());
put.addColumn("C1".getBytes(), "pay_comments".getBytes(), fileds[7].getBytes());
put.addColumn("C1".getBytes(), "pay_channel".getBytes(), fileds[8].getBytes());
put.addColumn("C1".getBytes(), "pay_way".getBytes(), fileds[9].getBytes());
put.addColumn("C1".getBytes(), "status".getBytes(), fileds[10].getBytes());
put.addColumn("C1".getBytes(), "timestamp".getBytes(), fileds[11].getBytes());
put.addColumn("C1".getBytes(), "money".getBytes(), fileds[12].getBytes());
//4.将数据写入上下文
context.write(immutableBytesWritable,put);
}
j++;
System.out.println("写入了"+j+"条数据");

}
}

 

 

 

 

 

 

 

 

//-----------------驱动程序---------------


import com.sun.security.auth.login.ConfigFile;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import java.io.IOException;

public class SuckLoaderDriver {
public static void main(String[] args) throws IOException, ClassNotFoundException, InterruptedException {
Configuration conf = HBaseConfiguration.create();
conf.set("hbase.zookeeper.quorum", "node1:2181");
Job job = Job.getInstance(conf, "BuckLoader");
//2-: 如果要放置在yarn集群中执行. 必备配置 job.setJarByClass(BuckLoaderMapperTask.class);
job.setJarByClass(SuckLoaderDriver.class);
//3-: 封装天龙八部
//3-1: 设置输入类: 及其输入的路径 job.setInputFormatClass(TextInputFormat.class)+TextInputFormat.addInputPath
job.setInputFormatClass(TextInputFormat.class);
TextInputFormat.addInputPath(job, new Path("hdfs://node1:8020/bank/input/bank_record.csv"));
//3.2: 设置mapper类: 及其 map的输出的k2 和 v2 job.setMapperClass+setMapOutputKeyClass+setMapOutputValueClass
job.setMapperClass(BuckLoderMapper.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
job.setMapOutputValueClass(Put.class);
//3.3: 设置shuffle操作: 分区 排序 规约 分组 一切默认
//3.4: 设置reduce: 压根不存在job.setNumReduceTasks(0);
job.setNumReduceTasks(0);
//3.5: 设置 输出类 及其输出的路径 : job.setOutputFormatClass+(HFileOutputFormat2.class)
job.setOutputFormatClass(HFileOutputFormat2.class);
//3.6 调用HFileOutputFormat2的configureIncrementalLoad和setOutputPath方法
Connection hbaseConn = ConnectionFactory.createConnection(conf);
TableName tableName = TableName.valueOf("ITCAST_BANK:TRANSFER_RECORD1");
Table table = hbaseConn.getTable(tableName);

HFileOutputFormat2.configureIncrementalLoad(job, table, hbaseConn.getRegionLocator(tableName));
HFileOutputFormat2.setOutputPath(job, new Path("hdfs://node1:8020/bank/output/"));
//4. 提交任務-job.waitForCompletion
boolean flag = job.waitForCompletion(true);
//5. System.exit(flag ?0 : 1);
System.exit(flag ?0 : 1);
}

}


出现的问题
INFO - Finishing task: attempt_local1723295629_0001_m_000000_0
INFO - map task executor complete.
INFO - Waiting for reduce tasks
INFO - Starting task: attempt_local1723295629_0001_r_000000_0
INFO - File Output Committer Algorithm version is 1
INFO - ProcfsBasedProcessTree currently is supported only on Linux.
INFO - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@49279d5c
INFO - Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@6e3e49dd
INFO - MergerManager: memoryLimit=2654155520, maxSingleShuffleLimit=663538880, mergeThreshold=1751742720, ioSortFactor=10, memToMemMergeOutputsThreshold=10
INFO - attempt_local1723295629_0001_r_000000_0 Thread started: EventFetcher for fetching Map Completion Events
INFO - localfetcher#1 about to shuffle output of map attempt_local1723295629_0001_m_000000_0 decomp: 46370568 len: 46370572 to MEMORY
INFO - Read 46370568 bytes from map-output for attempt_local1723295629_0001_m_000000_0
INFO - closeInMemoryFile -> map-output of size: 46370568, inMemoryMapOutputs.size() -> 1, commitMemory -> 0, usedMemory ->46370568
INFO - EventFetcher is interrupted.. Returning
INFO - 1 / 1 copied.
INFO - finalMerge called with 1 in-memory map-outputs and 0 on-disk map-outputs
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46370524 bytes
INFO - Merged 1 segments, 46370568 bytes to disk to satisfy reduce memory limit
INFO - Merging 1 files, 46370572 bytes from disk
INFO - Merging 0 segments, 0 bytes from memory into reduce
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46370524 bytes
INFO - 1 / 1 copied.
INFO - mapred.skip.on is deprecated. Instead, use mapreduce.job.skiprecords
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52093, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb0034, negotiated timeout = 40000
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52097, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb0035, negotiated timeout = 40000
INFO - map 100% reduce 0%
INFO - Session: 0x176df6e58cb0034 closed
INFO - EventThread shut down for session: 0x176df6e58cb0034
INFO - Read 12 entries of class java.util.TreeSet(1.5 K) > reduce
INFO - map 100% reduce 11%
INFO - Session: 0x176df6e58cb0033 closed
INFO - EventThread shut down for session: 0x176df6e58cb0033
INFO - Session: 0x176df6e58cb0035 closed
INFO - EventThread shut down for session: 0x176df6e58cb0035
WARN - Something wrong locating rowkey 000008c4-4ab2-4d22-b083-c8d15fed15f3 in ITCAST_BANK:TRANSFER_RECORD
org.apache.hadoop.hbase.client.NoServerForRegionException: No server address listed in hbase:meta for region ITCAST_BANK:TRANSFER_RECORD,,1610100158588.afda1fb509e97a8e1d3d1e7d393c6ae6. containing row 000008c4-4ab2-4d22-b083-c8d15fed15f3
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegionInMeta(ConnectionImplementation.java:928)
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:784)
at org.apache.hadoop.hbase.client.HRegionLocator.getRegionLocation(HRegionLocator.java:64)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:58)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:47)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:295)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:233)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:135)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:53)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
WARN - Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties
INFO - Scheduled snapshot period at 10 second(s).
INFO - HBase metrics system started
INFO - Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl
INFO - Starting task: attempt_local1723295629_0001_r_000001_0
INFO - File Output Committer Algorithm version is 1
INFO - ProcfsBasedProcessTree currently is supported only on Linux.
INFO - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@269e579b
INFO - Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@545dd2a8
INFO - MergerManager: memoryLimit=2654155520, maxSingleShuffleLimit=663538880, mergeThreshold=1751742720, ioSortFactor=10, memToMemMergeOutputsThreshold=10
INFO - attempt_local1723295629_0001_r_000001_0 Thread started: EventFetcher for fetching Map Completion Events
INFO - localfetcher#2 about to shuffle output of map attempt_local1723295629_0001_m_000000_0 decomp: 46466632 len: 46466636 to MEMORY
INFO - Read 46466632 bytes from map-output for attempt_local1723295629_0001_m_000000_0
INFO - closeInMemoryFile -> map-output of size: 46466632, inMemoryMapOutputs.size() -> 1, commitMemory -> 0, usedMemory ->46466632
INFO - EventFetcher is interrupted.. Returning
INFO - 1 / 1 copied.
INFO - finalMerge called with 1 in-memory map-outputs and 0 on-disk map-outputs
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46466588 bytes
INFO - Merged 1 segments, 46466632 bytes to disk to satisfy reduce memory limit
INFO - Merging 1 files, 46466636 bytes from disk
INFO - Merging 0 segments, 0 bytes from memory into reduce
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46466588 bytes
INFO - 1 / 1 copied.
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52130, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb0036, negotiated timeout = 40000
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52134, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb0037, negotiated timeout = 40000
INFO - Session: 0x176df6e58cb0036 closed
INFO - EventThread shut down for session: 0x176df6e58cb0036
INFO - Read 12 entries of class java.util.TreeSet(1.5 K) > reduce
INFO - map 100% reduce 22%
INFO - Session: 0x176df6e58cb0037 closed
INFO - EventThread shut down for session: 0x176df6e58cb0037
WARN - Something wrong locating rowkey 2aaac043-c57e-444c-9097-e9fb20ad4638 in ITCAST_BANK:TRANSFER_RECORD
org.apache.hadoop.hbase.client.NoServerForRegionException: No server address listed in hbase:meta for region ITCAST_BANK:TRANSFER_RECORD,2aaaaaaa,1610100158588.4c97ec93f48fd693c24cb2d374cee979. containing row 2aaac043-c57e-444c-9097-e9fb20ad4638
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegionInMeta(ConnectionImplementation.java:928)
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:784)
at org.apache.hadoop.hbase.client.HRegionLocator.getRegionLocation(HRegionLocator.java:64)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:58)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:47)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:295)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:233)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:135)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:53)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
INFO - Starting task: attempt_local1723295629_0001_r_000002_0
INFO - File Output Committer Algorithm version is 1
INFO - ProcfsBasedProcessTree currently is supported only on Linux.
INFO - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@27ae95c4
INFO - Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@16df3e7d
INFO - MergerManager: memoryLimit=2654155520, maxSingleShuffleLimit=663538880, mergeThreshold=1751742720, ioSortFactor=10, memToMemMergeOutputsThreshold=10
INFO - attempt_local1723295629_0001_r_000002_0 Thread started: EventFetcher for fetching Map Completion Events
INFO - localfetcher#3 about to shuffle output of map attempt_local1723295629_0001_m_000000_0 decomp: 46266555 len: 46266559 to MEMORY
INFO - Read 46266555 bytes from map-output for attempt_local1723295629_0001_m_000000_0
INFO - closeInMemoryFile -> map-output of size: 46266555, inMemoryMapOutputs.size() -> 1, commitMemory -> 0, usedMemory ->46266555
INFO - EventFetcher is interrupted.. Returning
INFO - 1 / 1 copied.
INFO - finalMerge called with 1 in-memory map-outputs and 0 on-disk map-outputs
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46266511 bytes
INFO - Merged 1 segments, 46266555 bytes to disk to satisfy reduce memory limit
INFO - Merging 1 files, 46266559 bytes from disk
INFO - Merging 0 segments, 0 bytes from memory into reduce
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46266511 bytes
INFO - 1 / 1 copied.
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52177, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb0038, negotiated timeout = 40000
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52181, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb0039, negotiated timeout = 40000
INFO - Session: 0x176df6e58cb0038 closed
INFO - EventThread shut down for session: 0x176df6e58cb0038
INFO - Read 12 entries of class java.util.TreeSet(1.5 K) > reduce
INFO - map 100% reduce 33%
INFO - Session: 0x176df6e58cb0039 closed
INFO - EventThread shut down for session: 0x176df6e58cb0039
WARN - Something wrong locating rowkey 55556ab8-4ad6-4890-b293-1840d592a001 in ITCAST_BANK:TRANSFER_RECORD
org.apache.hadoop.hbase.client.NoServerForRegionException: No server address listed in hbase:meta for region ITCAST_BANK:TRANSFER_RECORD,55555554,1610100158588.5cd403feda9a0b9a4ca8f7cbe2fa8fb3. containing row 55556ab8-4ad6-4890-b293-1840d592a001
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegionInMeta(ConnectionImplementation.java:928)
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:784)
at org.apache.hadoop.hbase.client.HRegionLocator.getRegionLocation(HRegionLocator.java:64)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:58)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:47)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:295)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:233)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:135)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:53)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
INFO - Starting task: attempt_local1723295629_0001_r_000003_0
INFO - File Output Committer Algorithm version is 1
INFO - ProcfsBasedProcessTree currently is supported only on Linux.
INFO - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@4662bb8a
INFO - Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@581c4b3
INFO - MergerManager: memoryLimit=2654155520, maxSingleShuffleLimit=663538880, mergeThreshold=1751742720, ioSortFactor=10, memToMemMergeOutputsThreshold=10
INFO - attempt_local1723295629_0001_r_000003_0 Thread started: EventFetcher for fetching Map Completion Events
INFO - localfetcher#4 about to shuffle output of map attempt_local1723295629_0001_m_000000_0 decomp: 46422380 len: 46422384 to MEMORY
INFO - Read 46422380 bytes from map-output for attempt_local1723295629_0001_m_000000_0
INFO - closeInMemoryFile -> map-output of size: 46422380, inMemoryMapOutputs.size() -> 1, commitMemory -> 0, usedMemory ->46422380
INFO - EventFetcher is interrupted.. Returning
INFO - 1 / 1 copied.
INFO - finalMerge called with 1 in-memory map-outputs and 0 on-disk map-outputs
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46422336 bytes
INFO - Merged 1 segments, 46422380 bytes to disk to satisfy reduce memory limit
INFO - Merging 1 files, 46422384 bytes from disk
INFO - Merging 0 segments, 0 bytes from memory into reduce
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46422336 bytes
INFO - 1 / 1 copied.
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52271, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb003a, negotiated timeout = 40000
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52275, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb003b, negotiated timeout = 40000
INFO - Session: 0x176df6e58cb003a closed
INFO - EventThread shut down for session: 0x176df6e58cb003a
INFO - Read 12 entries of class java.util.TreeSet(1.5 K) > reduce
INFO - map 100% reduce 44%
INFO - Session: 0x176df6e58cb003b closed
INFO - EventThread shut down for session: 0x176df6e58cb003b
WARN - Something wrong locating rowkey 80000c06-124c-45fa-a704-71c8c97a4ac6 in ITCAST_BANK:TRANSFER_RECORD
org.apache.hadoop.hbase.client.NoServerForRegionException: No server address listed in hbase:meta for region ITCAST_BANK:TRANSFER_RECORD,7ffffffe,1610100158588.e57b9d9153fdeebaf47a7b66310010dc. containing row 80000c06-124c-45fa-a704-71c8c97a4ac6
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegionInMeta(ConnectionImplementation.java:928)
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:784)
at org.apache.hadoop.hbase.client.HRegionLocator.getRegionLocation(HRegionLocator.java:64)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:58)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:47)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:295)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:233)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:135)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:53)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
INFO - Starting task: attempt_local1723295629_0001_r_000004_0
INFO - File Output Committer Algorithm version is 1
INFO - ProcfsBasedProcessTree currently is supported only on Linux.
INFO - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@4eb71795
INFO - Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@711a5b1d
INFO - MergerManager: memoryLimit=2654155520, maxSingleShuffleLimit=663538880, mergeThreshold=1751742720, ioSortFactor=10, memToMemMergeOutputsThreshold=10
INFO - attempt_local1723295629_0001_r_000004_0 Thread started: EventFetcher for fetching Map Completion Events
INFO - localfetcher#5 about to shuffle output of map attempt_local1723295629_0001_m_000000_0 decomp: 46474878 len: 46474882 to MEMORY
INFO - Read 46474878 bytes from map-output for attempt_local1723295629_0001_m_000000_0
INFO - closeInMemoryFile -> map-output of size: 46474878, inMemoryMapOutputs.size() -> 1, commitMemory -> 0, usedMemory ->46474878
INFO - EventFetcher is interrupted.. Returning
INFO - 1 / 1 copied.
INFO - finalMerge called with 1 in-memory map-outputs and 0 on-disk map-outputs
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46474834 bytes
INFO - Merged 1 segments, 46474878 bytes to disk to satisfy reduce memory limit
INFO - Merging 1 files, 46474882 bytes from disk
INFO - Merging 0 segments, 0 bytes from memory into reduce
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46474834 bytes
INFO - 1 / 1 copied.
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52361, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb003c, negotiated timeout = 40000
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52365, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb003d, negotiated timeout = 40000
INFO - Session: 0x176df6e58cb003c closed
INFO - EventThread shut down for session: 0x176df6e58cb003c
INFO - Read 12 entries of class java.util.TreeSet(1.6 K) > reduce
INFO - map 100% reduce 56%
INFO - Session: 0x176df6e58cb003d closed
INFO - EventThread shut down for session: 0x176df6e58cb003d
WARN - Something wrong locating rowkey aaaab0d4-be20-46d4-852b-c0309a13df67 in ITCAST_BANK:TRANSFER_RECORD
org.apache.hadoop.hbase.client.NoServerForRegionException: No server address listed in hbase:meta for region ITCAST_BANK:TRANSFER_RECORD,aaaaaaa8,1610100158588.e6f65b2328f9cb14741adbe1d41f2cb5. containing row aaaab0d4-be20-46d4-852b-c0309a13df67
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegionInMeta(ConnectionImplementation.java:928)
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:784)
at org.apache.hadoop.hbase.client.HRegionLocator.getRegionLocation(HRegionLocator.java:64)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:58)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:47)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:295)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:233)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:135)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:53)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
INFO - Starting task: attempt_local1723295629_0001_r_000005_0
INFO - File Output Committer Algorithm version is 1
INFO - ProcfsBasedProcessTree currently is supported only on Linux.
INFO - Using ResourceCalculatorProcessTree : org.apache.hadoop.yarn.util.WindowsBasedProcessTree@423268f4
INFO - Using ShuffleConsumerPlugin: org.apache.hadoop.mapreduce.task.reduce.Shuffle@1dac5fb4
INFO - MergerManager: memoryLimit=2654155520, maxSingleShuffleLimit=663538880, mergeThreshold=1751742720, ioSortFactor=10, memToMemMergeOutputsThreshold=10
INFO - attempt_local1723295629_0001_r_000005_0 Thread started: EventFetcher for fetching Map Completion Events
INFO - localfetcher#6 about to shuffle output of map attempt_local1723295629_0001_m_000000_0 decomp: 46323199 len: 46323203 to MEMORY
INFO - Read 46323199 bytes from map-output for attempt_local1723295629_0001_m_000000_0
INFO - closeInMemoryFile -> map-output of size: 46323199, inMemoryMapOutputs.size() -> 1, commitMemory -> 0, usedMemory ->46323199
INFO - EventFetcher is interrupted.. Returning
INFO - 1 / 1 copied.
INFO - finalMerge called with 1 in-memory map-outputs and 0 on-disk map-outputs
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46323155 bytes
INFO - Merged 1 segments, 46323199 bytes to disk to satisfy reduce memory limit
INFO - Merging 1 files, 46323203 bytes from disk
INFO - Merging 0 segments, 0 bytes from memory into reduce
INFO - Merging 1 sorted segments
INFO - Down to the last merge-pass, with 1 segments left of total size: 46323155 bytes
INFO - 1 / 1 copied.
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52400, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb003e, negotiated timeout = 40000
INFO - Initiating client connection, connectString=node1:2181 sessionTimeout=90000 watcher=org.apache.hadoop.hbase.zookeeper.ReadOnlyZKClient$$Lambda$7/1801448323@253e0001
INFO - jute.maxbuffer value is 4194304 Bytes
INFO - zookeeper.request.timeout value is 0. feature enabled=
INFO - Opening socket connection to server node1/192.168.88.161:2181. Will not attempt to authenticate using SASL (unknown error)
INFO - Socket connection established, initiating session, client: /192.168.88.1:52404, server: node1/192.168.88.161:2181
INFO - Session establishment complete on server node1/192.168.88.161:2181, sessionid = 0x176df6e58cb003f, negotiated timeout = 40000
INFO - Session: 0x176df6e58cb003e closed
INFO - EventThread shut down for session: 0x176df6e58cb003e
INFO - Read 12 entries of class java.util.TreeSet(1.5 K) > reduce
INFO - map 100% reduce 67%
INFO - Session: 0x176df6e58cb003f closed
INFO - EventThread shut down for session: 0x176df6e58cb003f
WARN - Something wrong locating rowkey d5555757-08eb-4b33-bb01-4c220b65e9e3 in ITCAST_BANK:TRANSFER_RECORD
org.apache.hadoop.hbase.client.NoServerForRegionException: No server address listed in hbase:meta for region ITCAST_BANK:TRANSFER_RECORD,d5555552,1610100158588.1a9dc3eb0ea0108cca8f44be105172fe. containing row d5555757-08eb-4b33-bb01-4c220b65e9e3
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegionInMeta(ConnectionImplementation.java:928)
at org.apache.hadoop.hbase.client.ConnectionImplementation.locateRegion(ConnectionImplementation.java:784)
at org.apache.hadoop.hbase.client.HRegionLocator.getRegionLocation(HRegionLocator.java:64)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:58)
at org.apache.hadoop.hbase.client.RegionLocator.getRegionLocation(RegionLocator.java:47)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:295)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:233)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:135)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:53)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
INFO - reduce task executor complete.
WARN - job_local1723295629_0001
java.lang.Exception: java.lang.RuntimeException: native snappy library not available: this version of libhadoop was built without snappy support.
at org.apache.hadoop.mapred.LocalJobRunner$Job.runTasks(LocalJobRunner.java:462)
at org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:529)
Caused by: java.lang.RuntimeException: native snappy library not available: this version of libhadoop was built without snappy support.
at org.apache.hadoop.io.compress.SnappyCodec.checkNativeCodeLoaded(SnappyCodec.java:65)
at org.apache.hadoop.io.compress.SnappyCodec.getCompressorType(SnappyCodec.java:134)
at org.apache.hadoop.io.compress.CodecPool.getCompressor(CodecPool.java:150)
at org.apache.hadoop.io.compress.CodecPool.getCompressor(CodecPool.java:168)
at org.apache.hadoop.hbase.io.compress.Compression$Algorithm.getCompressor(Compression.java:356)
at org.apache.hadoop.hbase.io.encoding.HFileBlockDefaultEncodingContext.<init>(HFileBlockDefaultEncodingContext.java:86)
at org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder.newDataBlockEncodingContext(NoOpDataBlockEncoder.java:86)
at org.apache.hadoop.hbase.io.hfile.HFileBlock$Writer.<init>(HFileBlock.java:866)
at org.apache.hadoop.hbase.io.hfile.HFileWriterImpl.finishInit(HFileWriterImpl.java:303)
at org.apache.hadoop.hbase.io.hfile.HFileWriterImpl.<init>(HFileWriterImpl.java:182)
at org.apache.hadoop.hbase.io.hfile.HFile$WriterFactory.create(HFile.java:319)
at org.apache.hadoop.hbase.regionserver.StoreFileWriter.<init>(StoreFileWriter.java:116)
at org.apache.hadoop.hbase.regionserver.StoreFileWriter.<init>(StoreFileWriter.java:73)
at org.apache.hadoop.hbase.regionserver.StoreFileWriter$Builder.build(StoreFileWriter.java:539)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.getNewWriter(HFileOutputFormat2.java:405)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:304)
at org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2$1.write(HFileOutputFormat2.java:233)
at org.apache.hadoop.mapred.ReduceTask$NewTrackingRecordWriter.write(ReduceTask.java:558)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.reduce.WrappedReducer$Context.write(WrappedReducer.java:105)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:135)
at org.apache.hadoop.hbase.mapreduce.PutSortReducer.reduce(PutSortReducer.java:53)
at org.apache.hadoop.mapreduce.Reducer.run(Reducer.java:171)
at org.apache.hadoop.mapred.ReduceTask.runNewReducer(ReduceTask.java:627)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:389)
at org.apache.hadoop.mapred.LocalJobRunner$Job$ReduceTaskRunnable.run(LocalJobRunner.java:319)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
INFO - Job job_local1723295629_0001 failed with state FAILED due to: NA
INFO - Counters: 35
File System Counters
FILE: Number of bytes read=3166988311
FILE: Number of bytes written=5119411991
FILE: Number of read operations=0
FILE: Number of large read operations=0
FILE: Number of write operations=0
HDFS: Number of bytes read=716269400
HDFS: Number of bytes written=0
HDFS: Number of read operations=65
HDFS: Number of large read operations=0
HDFS: Number of write operations=43
Map-Reduce Framework
Map input records=500000
Map output records=500000
Map output bytes=276324200
Map output materialized bytes=278324236
Input split bytes=109
Combine input records=0
Combine output records=0
Reduce input groups=6
Reduce shuffle bytes=278324236
Reduce input records=6
Reduce output records=0
Spilled Records=1000000
Shuffled Maps =6
Failed Shuffles=0
Merged Map outputs=6
GC time elapsed (ms)=230
Total committed heap usage (bytes)=6945767424
Shuffle Errors
BAD_ID=0
CONNECTION=0
IO_ERROR=0
WRONG_LENGTH=0
WRONG_MAP=0
WRONG_REDUCE=0
File Input Format Counters
Bytes Read=102324200
File Output Format Counters
Bytes Written=0
ERROR - Failed to close inode 17441
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /bank/output/_temporary/0/_temporary/attempt_local1723295629_0001_r_000004_0/C1/ba69fb460bdd4c09b3daa83a6f4a7a9a (inode 17441): File does not exist. Holder DFSClient_NONMAPREDUCE_503753593_1 does not have any open files.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3408)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3485)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3462)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:787)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:537)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2217)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2213)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1754)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2211)

at org.apache.hadoop.ipc.Client.call(Client.java:1476)
at org.apache.hadoop.ipc.Client.call(Client.java:1413)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy10.complete(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:462)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy11.complete(Unknown Source)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:2506)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:2482)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:2447)
at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:946)
at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:978)
at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2758)
at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2775)
at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
ERROR - Failed to close inode 17444
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /bank/output/_temporary/0/_temporary/attempt_local1723295629_0001_r_000005_0/C1/3e84c4e9e1084d088241d536d3f049ba (inode 17444): File does not exist. Holder DFSClient_NONMAPREDUCE_503753593_1 does not have any open files.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3408)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3485)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3462)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:787)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:537)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2217)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2213)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1754)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2211)

at org.apache.hadoop.ipc.Client.call(Client.java:1476)
at org.apache.hadoop.ipc.Client.call(Client.java:1413)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy10.complete(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:462)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy11.complete(Unknown Source)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:2506)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:2482)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:2447)
at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:946)
at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:978)
at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2758)
at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2775)
at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
ERROR - Failed to close inode 17429
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /bank/output/_temporary/0/_temporary/attempt_local1723295629_0001_r_000000_0/C1/59bcd5a4aa564f8dac89c9d8422cc542 (inode 17429): File does not exist. Holder DFSClient_NONMAPREDUCE_503753593_1 does not have any open files.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3408)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3485)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3462)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:787)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:537)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2217)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2213)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1754)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2211)

at org.apache.hadoop.ipc.Client.call(Client.java:1476)
at org.apache.hadoop.ipc.Client.call(Client.java:1413)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy10.complete(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:462)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy11.complete(Unknown Source)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:2506)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:2482)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:2447)
at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:946)
at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:978)
at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2758)
at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2775)
at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
ERROR - Failed to close inode 17432
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /bank/output/_temporary/0/_temporary/attempt_local1723295629_0001_r_000001_0/C1/29985554bbab499986a01d869d60564d (inode 17432): File does not exist. Holder DFSClient_NONMAPREDUCE_503753593_1 does not have any open files.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3408)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3485)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3462)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:787)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:537)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2217)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2213)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1754)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2211)

at org.apache.hadoop.ipc.Client.call(Client.java:1476)
at org.apache.hadoop.ipc.Client.call(Client.java:1413)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy10.complete(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:462)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy11.complete(Unknown Source)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:2506)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:2482)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:2447)
at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:946)
at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:978)
at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2758)
at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2775)
at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
ERROR - Failed to close inode 17435
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /bank/output/_temporary/0/_temporary/attempt_local1723295629_0001_r_000002_0/C1/79d85a98bc6f49a3a0063fe7176b4903 (inode 17435): File does not exist. Holder DFSClient_NONMAPREDUCE_503753593_1 does not have any open files.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3408)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3485)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3462)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:787)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:537)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2217)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2213)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1754)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2211)

at org.apache.hadoop.ipc.Client.call(Client.java:1476)
at org.apache.hadoop.ipc.Client.call(Client.java:1413)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy10.complete(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:462)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy11.complete(Unknown Source)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:2506)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:2482)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:2447)
at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:946)
at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:978)
at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2758)
at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2775)
at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
ERROR - Failed to close inode 17438
org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException): No lease on /bank/output/_temporary/0/_temporary/attempt_local1723295629_0001_r_000003_0/C1/0009081770e5426bb1f96b5092d59c58 (inode 17438): File does not exist. Holder DFSClient_NONMAPREDUCE_503753593_1 does not have any open files.
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkLease(FSNamesystem.java:3408)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFileInternal(FSNamesystem.java:3485)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.completeFile(FSNamesystem.java:3462)
at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.complete(NameNodeRpcServer.java:787)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.complete(ClientNamenodeProtocolServerSideTranslatorPB.java:537)
at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine.java:616)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:982)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2217)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:2213)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1754)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2211)

at org.apache.hadoop.ipc.Client.call(Client.java:1476)
at org.apache.hadoop.ipc.Client.call(Client.java:1413)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:229)
at com.sun.proxy.$Proxy10.complete(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.complete(ClientNamenodeProtocolTranslatorPB.java:462)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:191)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy11.complete(Unknown Source)
at org.apache.hadoop.hdfs.DFSOutputStream.completeFile(DFSOutputStream.java:2506)
at org.apache.hadoop.hdfs.DFSOutputStream.closeImpl(DFSOutputStream.java:2482)
at org.apache.hadoop.hdfs.DFSOutputStream.close(DFSOutputStream.java:2447)
at org.apache.hadoop.hdfs.DFSClient.closeAllFilesBeingWritten(DFSClient.java:946)
at org.apache.hadoop.hdfs.DFSClient.closeOutputStreams(DFSClient.java:978)
at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1076)
at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2758)
at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2775)
at org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
问题原因:
自己硬盘不够,当时是在虚拟机上跑的,但是硬盘只有10几个G结果就出现错误,清空内存后就好了

posted @ 2021-01-10 00:58  不吃饭的猪  阅读(1511)  评论(0)    收藏  举报