mapreduce - FileNotFoundException while running Hadoop MR Job -


i writing 1 mapper class should read files hdfs location , create record (using custom class) each file. code mapper class :-

package com.nayan.bigdata.hadoop;  import java.io.ioexception; import java.util.arraylist; import java.util.list;  import org.apache.hadoop.fs.fsdatainputstream; import org.apache.hadoop.fs.filestatus; import org.apache.hadoop.fs.filesystem; import org.apache.hadoop.fs.path; import org.apache.hadoop.io.longwritable; import org.apache.hadoop.io.text; import org.apache.hadoop.mapreduce.mapper; import org.apache.log4j.logger;  /**  * @file    : filetorecordmapper.java  * @author  : nayan  * @version : 1.0.0  * @date    : 27-aug-2013 12:13:44 pm  * @desc    : mapper class read files , convert records.  */ public class filetorecordmapper extends         mapper<longwritable, text, text, recordwritable> {      private static logger logger = logger.getlogger(filetorecordmapper.class);     list<path> allpaths;     filesystem fs;      @override     protected void cleanup(context context)             throws ioexception, interruptedexception {         logger.info("inside cleanup method.");     }      @override     protected void map(longwritable key, text value,             context context)             throws ioexception, interruptedexception {         logger.info("starting map method of filetorecordmapper class.");         for(path path : allpaths) {             fsdatainputstream in = this.fs.open(path);             text filepath = new text(path.getname());             text directorypath = new text(path.getparent().getname());             text filename = new text(path.getname().substring(path.getname().lastindexof('/') + 1,             path.getname().length()));             byte[] b = new byte[1024];             stringbuilder contentbuilder = new stringbuilder();             while ((in.read(b)) > 0) {                 contentbuilder.append(new string(b, "utf-8"));             }             text filecontent = new text(contentbuilder.tostring());             in.close();             recordwritable record = new recordwritable(filepath, filename,                      filecontent, new longwritable(system.currenttimemillis()));             logger.info("record created : " + record);             context.write(directorypath, record);             logger.info("map method of filetorecordmapper class completed.");         }     }      @override     public void run(context context)             throws ioexception, interruptedexception {         logger.info("inside run method.");     }      @override     protected void setup(context context)             throws ioexception, interruptedexception {         logger.info("inside setup method.");         try {             logger.info("starting configure method of filetorecordmapper class.");             fs = filesystem.get(context.getconfiguration());             path path = new path(context.getconfiguration().get("mapred.input.dir"));             allpaths = getallpaths(path);         } catch (ioexception e) {             logger.error("error while fetching paths.", e);         }         logger.info("paths : " + ((null != allpaths) ? allpaths : "null"));         logger.info("configure method of filetorecordmapper class completed.");         super.setup(context);     }      private list<path> getallpaths(path path) throws ioexception {         arraylist<path> paths = new arraylist<path>();         getallpaths(path, paths);         return paths;     }      private void getallpaths(path path, list<path> paths) throws ioexception{                try {             if (!this.fs.isfile(path)) {                 (filestatus s : fs.liststatus(path)) {                     getallpaths(s.getpath(), paths);                 }             } else {                 paths.add(path);             }         } catch (ioexception e) {             logger.error("file system exception.", e);             throw e;         }        } } 

class record :-

package com.nayan.bigdata.hadoop;  import java.io.datainput; import java.io.dataoutput; import java.io.ioexception;  import org.apache.hadoop.io.longwritable; import org.apache.hadoop.io.text; import org.apache.hadoop.io.writable;  /**  * @file    : recordwritable.java  * @author  : nayan  * @version : 1.0.0  * @date    : 21-aug-2013 1:53:12 pm  * @desc    : class create record in accumulo  */ public class recordwritable implements writable {      private text filepath;     private text filename;     private text filecontent;     private longwritable timestamp;      public recordwritable() {         this.filepath = new text();         this.filename = new text();         this.filecontent = new text();         this.timestamp = new longwritable(system.currenttimemillis());     }      /**      * @param filepath      * @param filename      * @param filecontent      * @param timestamp      */     public recordwritable(text filepath, text filename, text filecontent,             longwritable timestamp) {         this.filepath = filepath;         this.filename = filename;         this.filecontent = filecontent;         this.timestamp = timestamp;     }         public text getfilepath() {         return filepath;     }      public void setfilepath(text filepath) {         this.filepath = filepath;     }      public text getfilename() {         return filename;     }      public void setfilename(text filename) {         this.filename = filename;     }      public text getfilecontent() {         return filecontent;     }      public void setfilecontent(text filecontent) {         this.filecontent = filecontent;     }      public longwritable gettimestamp() {         return timestamp;     }      public void settimestamp(longwritable timestamp) {         this.timestamp = timestamp;     }         @override     public int hashcode() {         return this.filepath.getlength() + this.filename.getlength() + this.filecontent.getlength();     }      @override     public boolean equals(object obj) {         if(obj instanceof recordwritable) {             recordwritable otherrecord = (recordwritable) obj;             return this.filepath.equals(otherrecord.filepath) && this.filename.equals(otherrecord.filename);         }         return false;     }      @override     public string tostring() {         stringbuilder recorddesc = new stringbuilder("record details ::\t");         recorddesc.append("file path + ").append(this.filepath).append("\t");         recorddesc.append("file name + ").append(this.filename).append("\t");         recorddesc.append("file content length + ").append(this.filecontent.getlength()).append("\t");         recorddesc.append("file timestamp + ").append(this.timestamp).append("\t");         return recorddesc.tostring();     }      @override     public void readfields(datainput din) throws ioexception {         filepath.readfields(din);         filename.readfields(din);         filecontent.readfields(din);         timestamp.readfields(din);     }      @override     public void write(dataoutput dout) throws ioexception {         filepath.write(dout);         filename.write(dout);         filecontent.write(dout);         timestamp.write(dout);     } } 

job runner class :-

package com.nayan.bigdata.hadoop;  import org.apache.hadoop.conf.configuration; import org.apache.hadoop.conf.configured; import org.apache.hadoop.io.text; import org.apache.hadoop.mapreduce.job; import org.apache.hadoop.util.tool; import org.apache.hadoop.util.toolrunner; import org.apache.log4j.logger;  /**  * @file    : hadoopjobrunner.java  * @author  : nayan  * @version : 1.0.0  * @date    : 22-aug-2013 12:45:15 pm  * @desc    : class run hadoop mr job.  */ public class hadoopjobrunner extends configured implements tool {      private static logger logger = logger.getlogger(hadoopjobrunner.class);      /**      * @param args      * @throws exception       */     public static void main(string[] args) throws exception {         int res = toolrunner.run(new configuration(), new hadoopjobrunner(), args);         system.exit(res);     }      @override     public int run(string[] arg0) throws exception {         logger.info("initiating hadoop job.");         configuration conf = new configuration(true);         conf.setstrings("mapred.output.dir", arg0[1]);         conf.setstrings("mapred.input.dir", arg0[0]);          job mrjob = new job(conf, "filerecordsjob");                 mrjob.setjarbyclass(hadoopjobrunner.class);          mrjob.setmapoutputkeyclass(text.class);         mrjob.setmapoutputvalueclass(recordwritable.class);         mrjob.setmapperclass(filetorecordmapper.class);          mrjob.setreducerclass(filerecordsreducer.class);         mrjob.setoutputkeyclass(text.class);         mrjob.setoutputvalueclass(recordwritable.class);          logger.info("mapred job configuration : " + mrjob.getconfiguration().tostring());         logger.info("input path : " + mrjob.getconfiguration().get("mapred.input.dir"));         return mrjob.waitforcompletion(true) ? 0 : 1;     } } 

pom file project :-

<project xmlns="http://maven.apache.org/pom/4.0.0" xmlns:xsi="http://www.w3.org/2001/xmlschema-instance"     xsi:schemalocation="http://maven.apache.org/pom/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">     <modelversion>4.0.0</modelversion>      <groupid>com.nayan.bigdata</groupid>     <artifactid>bigdataoperations</artifactid>     <version>1.0-snapshot</version>     <packaging>jar</packaging>     <name>bigdataoperations</name>      <properties>         <hadoop.version>0.20.2</hadoop.version>         <project.build.sourceencoding>utf-8</project.build.sourceencoding>         <project.reporting.outputencoding>utf-8</project.reporting.outputencoding>     </properties>      <dependencies>          <dependency>             <groupid>org.apache.hadoop</groupid>             <artifactid>hadoop-core</artifactid>             <version>${hadoop.version}</version>         </dependency>          <dependency>             <groupid>log4j</groupid>             <artifactid>log4j</artifactid>             <version>1.2.17</version>         </dependency>          <dependency>             <groupid>org.hamcrest</groupid>             <artifactid>hamcrest-all</artifactid>             <version>1.3</version>             <scope>test</scope>         </dependency>          <dependency>             <groupid>junit</groupid>             <artifactid>junit</artifactid>             <version>4.11</version>             <scope>test</scope>         </dependency>      </dependencies>      <build>         <pluginmanagement>             <plugins>                 <plugin>                     <groupid>org.apache.maven.plugins</groupid>                     <artifactid>maven-jar-plugin</artifactid>                     <configuration>                         <archive>                             <manifest>                                 <mainclass>com.nayan.bigdata.hadoop.hadoopjobrunner</mainclass>                             </manifest>                         </archive>                     </configuration>                 </plugin>             </plugins>         </pluginmanagement>     </build> </project> 

when run jar, getting output on console :-

    [root@koversevm tmp]# hadoop jar bigdataoperations-1.0-snapshot.jar /usr/hadoop/sample /usr/hadoop/jobout 13/08/28 18:33:57 info hadoop.hadoopjobrunner: initiating hadoop job. 13/08/28 18:33:57 info hadoop.hadoopjobrunner: setting input/output path. 13/08/28 18:33:57 info hadoop.hadoopjobrunner: mapred job configuration : configuration: core-default.xml, core-site.xml, mapred-default.xml, mapred-site.xml 13/08/28 18:33:57 info hadoop.hadoopjobrunner: input path : null 13/08/28 18:33:58 warn mapred.jobclient: use genericoptionsparser parsing arguments. applications should implement tool same. 13/08/28 18:33:58 info input.fileinputformat: total input paths process : 8 13/08/28 18:33:58 warn util.nativecodeloader: unable load native-hadoop library platform... using builtin-java classes applicable 13/08/28 18:33:58 warn snappy.loadsnappy: snappy native library not loaded 13/08/28 18:33:58 info mapred.jobclient: running job: job_201308281800_0008 13/08/28 18:33:59 info mapred.jobclient:  map 0% reduce 0% 13/08/28 18:34:06 info mapred.jobclient:  map 25% reduce 0% 13/08/28 18:34:13 info mapred.jobclient:  map 50% reduce 0% 13/08/28 18:34:17 info mapred.jobclient:  map 75% reduce 0% 13/08/28 18:34:23 info mapred.jobclient:  map 100% reduce 0% 13/08/28 18:34:24 info mapred.jobclient:  map 100% reduce 33% 13/08/28 18:34:26 info mapred.jobclient:  map 100% reduce 100% 13/08/28 18:34:27 info mapred.jobclient: job complete: job_201308281800_0008 13/08/28 18:34:27 info mapred.jobclient: counters: 25 13/08/28 18:34:27 info mapred.jobclient:   job counters  13/08/28 18:34:27 info mapred.jobclient:     launched reduce tasks=1 13/08/28 18:34:27 info mapred.jobclient:     slots_millis_maps=44066 13/08/28 18:34:27 info mapred.jobclient:     total time spent reduces waiting after reserving slots (ms)=0 13/08/28 18:34:27 info mapred.jobclient:     total time spent maps waiting after reserving slots (ms)=0 13/08/28 18:34:27 info mapred.jobclient:     launched map tasks=8 13/08/28 18:34:27 info mapred.jobclient:     data-local map tasks=8 13/08/28 18:34:27 info mapred.jobclient:     slots_millis_reduces=19034 13/08/28 18:34:27 info mapred.jobclient:   filesystemcounters 13/08/28 18:34:27 info mapred.jobclient:     file_bytes_read=6 13/08/28 18:34:27 info mapred.jobclient:     hdfs_bytes_read=1011 13/08/28 18:34:27 info mapred.jobclient:     file_bytes_written=549207 13/08/28 18:34:27 info mapred.jobclient:   map-reduce framework 13/08/28 18:34:27 info mapred.jobclient:     map input records=0 13/08/28 18:34:27 info mapred.jobclient:     reduce shuffle bytes=48 13/08/28 18:34:27 info mapred.jobclient:     spilled records=0 13/08/28 18:34:27 info mapred.jobclient:     map output bytes=0 13/08/28 18:34:27 info mapred.jobclient:     cpu time spent (ms)=3030 13/08/28 18:34:27 info mapred.jobclient:     total committed heap usage (bytes)=1473413120 13/08/28 18:34:27 info mapred.jobclient:     combine input records=0 13/08/28 18:34:27 info mapred.jobclient:     split_raw_bytes=1011 13/08/28 18:34:27 info mapred.jobclient:     reduce input records=0 13/08/28 18:34:27 info mapred.jobclient:     reduce input groups=0 13/08/28 18:34:27 info mapred.jobclient:     combine output records=0 13/08/28 18:34:27 info mapred.jobclient:     physical memory (bytes) snapshot=1607675904 13/08/28 18:34:27 info mapred.jobclient:     reduce output records=0 13/08/28 18:34:27 info mapred.jobclient:     virtual memory (bytes) snapshot=23948111872 13/08/28 18:34:27 info mapred.jobclient:     map output records=0 

but when logs found following exception :-

task logs: 'attempt_201308281800_0008_m_000000_0' stdout logs 2013-08-28 18:34:01 debug child:82 - child starting 2013-08-28 18:34:02 debug groups:136 -  creating new groups object 2013-08-28 18:34:02 debug groups:59 - group mapping impl=org.apache.hadoop.security.shellbasedunixgroupsmapping; cachetimeout=300000 2013-08-28 18:34:02 debug usergroupinformation:193 - hadoop login 2013-08-28 18:34:02 debug usergroupinformation:142 - hadoop login commit 2013-08-28 18:34:02 debug usergroupinformation:172 - using local user:unixprincipal: mapred 2013-08-28 18:34:02 debug usergroupinformation:664 - ugi loginuser:mapred (auth:simple) 2013-08-28 18:34:02 debug filesystem:1598 - creating filesystem file:///var/lib/hadoop-0.20/cache/mapred/mapred/local/tasktracker/root/jobcache/job_201308281800_0008/jobtoken 2013-08-28 18:34:02 debug tokencache:182 - task: loaded jobtokenfile from: /var/lib/hadoop-0.20/cache/mapred/mapred/local/tasktracker/root/jobcache/job_201308281800_0008/jobtoken; num of sec keys  = 0 number of tokens 1 2013-08-28 18:34:02 debug child:106 - loading token. # keys =0; file=/var/lib/hadoop-0.20/cache/mapred/mapred/local/tasktracker/root/jobcache/job_201308281800_0008/jobtoken 2013-08-28 18:34:02 debug usergroupinformation:1300 - priviledgedaction as:job_201308281800_0008 (auth:simple) from:org.apache.hadoop.mapred.child.main(child.java:121) 2013-08-28 18:34:02 debug client:256 - ping interval is60000ms. 2013-08-28 18:34:02 debug client:299 - use simple authentication protocol taskumbilicalprotocol 2013-08-28 18:34:02 debug client:569 - connecting /127.0.0.1:50925 2013-08-28 18:34:02 debug client:762 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008: starting, having connections 1 2013-08-28 18:34:02 debug client:808 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 sending #0 2013-08-28 18:34:02 debug client:861 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 got value #0 2013-08-28 18:34:02 debug rpc:230 - call: getprotocolversion 98 2013-08-28 18:34:02 debug client:808 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 sending #1 2013-08-28 18:34:02 debug client:861 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 got value #1 2013-08-28 18:34:02 debug sortedranges:347 - currentindex 0   0:0 2013-08-28 18:34:02 debug counters:177 - creating group org.apache.hadoop.mapred.task$counter bundle 2013-08-28 18:34:02 debug counters:314 - adding spilled_records 2013-08-28 18:34:02 debug counters:177 - creating group org.apache.hadoop.mapred.task$counter bundle 2013-08-28 18:34:02 debug sortedranges:347 - currentindex 0   0:0 2013-08-28 18:34:02 debug sortedranges:347 - currentindex 1   0:0 2013-08-28 18:34:02 debug rpc:230 - call: gettask 208 2013-08-28 18:34:03 debug taskrunner:653 - mapred.local.dir child : /var/lib/hadoop-0.20/cache/mapred/mapred/local/tasktracker/root/jobcache/job_201308281800_0008/attempt_201308281800_0008_m_000000_0 2013-08-28 18:34:03 debug nativecodeloader:40 - trying load custom-built native-hadoop library... 2013-08-28 18:34:03 debug nativecodeloader:47 - failed load native-hadoop error: java.lang.unsatisfiedlinkerror: no hadoop in java.library.path 2013-08-28 18:34:03 debug nativecodeloader:48 - java.library.path=/usr/java/jdk1.6.0_45/jre/lib/amd64/server:/usr/java/jdk1.6.0_45/jre/lib/amd64:/usr/java/jdk1.6.0_45/jre/../lib/amd64:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib:/var/lib/hadoop-0.20/cache/mapred/mapred/local/tasktracker/root/jobcache/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/work 2013-08-28 18:34:03 warn  nativecodeloader:52 - unable load native-hadoop library platform... using builtin-java classes applicable 2013-08-28 18:34:03 debug taskrunner:709 - deleting contents of /var/lib/hadoop-0.20/cache/mapred/mapred/local/tasktracker/root/jobcache/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/work 2013-08-28 18:34:03 info  jvmmetrics:71 - initializing jvm metrics processname=map, sessionid= 2013-08-28 18:34:03 debug child:251 - creating remote user execute task: root 2013-08-28 18:34:03 debug usergroupinformation:1300 - priviledgedaction as:root (auth:simple) from:org.apache.hadoop.mapred.child.main(child.java:260) 2013-08-28 18:34:03 debug filesystem:1598 - creating filesystem hdfs://localhost:8020 2013-08-28 18:34:04 debug client:256 - ping interval is60000ms. 2013-08-28 18:34:04 debug client:299 - use simple authentication protocol clientprotocol 2013-08-28 18:34:04 debug client:569 - connecting localhost/127.0.0.1:8020 2013-08-28 18:34:04 debug client:808 - ipc client (47) connection localhost/127.0.0.1:8020 root sending #2 2013-08-28 18:34:04 debug client:762 - ipc client (47) connection localhost/127.0.0.1:8020 root: starting, having connections 2 2013-08-28 18:34:04 debug client:861 - ipc client (47) connection localhost/127.0.0.1:8020 root got value #2 2013-08-28 18:34:04 debug rpc:230 - call: getprotocolversion 18 2013-08-28 18:34:04 debug dfsclient:274 - short circuit read false 2013-08-28 18:34:04 debug dfsclient:280 - connect datanode via hostname false 2013-08-28 18:34:04 debug task:516 - using new api output committer 2013-08-28 18:34:04 info  processtree:65 - setsid exited exit code 0 2013-08-28 18:34:04 info  task:539 -  using resourcecalculatorplugin : org.apache.hadoop.util.linuxresourcecalculatorplugin@79ee2c2c 2013-08-28 18:34:04 debug procfsbasedprocesstree:238 - [ 16890 ] 2013-08-28 18:34:04 debug client:808 - ipc client (47) connection localhost/127.0.0.1:8020 root sending #3 2013-08-28 18:34:04 debug client:861 - ipc client (47) connection localhost/127.0.0.1:8020 root got value #3 2013-08-28 18:34:04 debug rpc:230 - call: getblocklocations 12 2013-08-28 18:34:04 debug dfsclient:2595 - connecting /127.0.0.1:50010 2013-08-28 18:34:04 debug fsinputchecker:1653 - dfsclient readchunk got seqno 0 offsetinblock 0 lastpacketinblock false packetlen 520 2013-08-28 18:34:04 debug counters:314 - adding split_raw_bytes 2013-08-28 18:34:04 debug dfsclient:2529 - client couldn't reuse - didnt send code 2013-08-28 18:34:04 info  maptask:613 - processing split: hdfs://localhost:8020/usr/hadoop/sample/2012mtcreportfinal.pdf:0+1419623 2013-08-28 18:34:04 debug counters:314 - adding map_input_records 2013-08-28 18:34:04 debug filesystem:1598 - creating filesystem file:/// 2013-08-28 18:34:04 info  maptask:803 - io.sort.mb = 100 2013-08-28 18:34:05 info  maptask:815 - data buffer = 79691776/99614720 2013-08-28 18:34:05 info  maptask:816 - record buffer = 262144/327680 2013-08-28 18:34:05 debug counters:314 - adding map_output_bytes 2013-08-28 18:34:05 debug counters:314 - adding map_output_records 2013-08-28 18:34:05 debug counters:314 - adding combine_input_records 2013-08-28 18:34:05 debug counters:314 - adding combine_output_records 2013-08-28 18:34:05 warn  loadsnappy:46 - snappy native library not loaded 2013-08-28 18:34:05 debug client:808 - ipc client (47) connection localhost/127.0.0.1:8020 root sending #4 2013-08-28 18:34:05 debug client:861 - ipc client (47) connection localhost/127.0.0.1:8020 root got value #4 2013-08-28 18:34:05 debug rpc:230 - call: getblocklocations 4 2013-08-28 18:34:05 info  filetorecordmapper:65 - inside run method. 2013-08-28 18:34:05 info  maptask:1142 - starting flush of map output 2013-08-28 18:34:05 info  task:830 - task:attempt_201308281800_0008_m_000000_0 done. , in process of commiting 2013-08-28 18:34:05 debug counters:177 - creating group filesystemcounters nothing 2013-08-28 18:34:05 debug counters:314 - adding file_bytes_written 2013-08-28 18:34:05 debug counters:314 - adding hdfs_bytes_read 2013-08-28 18:34:05 debug counters:314 - adding committed_heap_bytes 2013-08-28 18:34:05 debug procfsbasedprocesstree:238 - [ 16890 ] 2013-08-28 18:34:05 debug counters:314 - adding cpu_milliseconds 2013-08-28 18:34:05 debug counters:314 - adding physical_memory_bytes 2013-08-28 18:34:05 debug counters:314 - adding virtual_memory_bytes 2013-08-28 18:34:05 debug client:808 - ipc client (47) connection localhost/127.0.0.1:8020 root sending #5 2013-08-28 18:34:05 debug client:861 - ipc client (47) connection localhost/127.0.0.1:8020 root got value #5 2013-08-28 18:34:05 debug rpc:230 - call: getfileinfo 2 2013-08-28 18:34:05 debug task:658 - attempt_201308281800_0008_m_000000_0 progress/ping thread exiting since got interrupted 2013-08-28 18:34:05 debug client:808 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 sending #6 2013-08-28 18:34:05 debug client:861 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 got value #6 2013-08-28 18:34:05 debug rpc:230 - call: statusupdate 3 2013-08-28 18:34:05 debug client:808 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 sending #7 2013-08-28 18:34:05 debug client:861 - ipc client (47) connection /127.0.0.1:50925 job_201308281800_0008 got value #7 2013-08-28 18:34:05 debug rpc:230 - call: done 1 2013-08-28 18:34:05 info  task:942 - task 'attempt_201308281800_0008_m_000000_0' done. 2013-08-28 18:34:05 info  tasklogstruncater:69 - initializing logs' truncater mapretainsize=-1 , reduceretainsize=-1 2013-08-28 18:34:05 debug tasklogstruncater:174 - truncation not needed /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/stdout 2013-08-28 18:34:05 debug tasklogstruncater:174 - truncation not needed /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/stderr 2013-08-28 18:34:05 debug tasklogstruncater:202 - cannot open /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/syslog reading. continuing other log files java.io.filenotfoundexception: /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/syslog (no such file or directory)     @ java.io.fileinputstream.open(native method)     @ java.io.fileinputstream.<init>(fileinputstream.java:120)     @ org.apache.hadoop.mapred.tasklogstruncater.truncatelogs(tasklogstruncater.java:199)     @ org.apache.hadoop.mapred.child$4.run(child.java:271)     @ java.security.accesscontroller.doprivileged(native method)     @ javax.security.auth.subject.doas(subject.java:396)     @ org.apache.hadoop.security.usergroupinformation.doas(usergroupinformation.java:1278)     @ org.apache.hadoop.mapred.child.main(child.java:260) 2013-08-28 18:34:05 debug tasklogstruncater:202 - cannot open /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/profile.out reading. continuing other log files java.io.filenotfoundexception: /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/profile.out (no such file or directory)     @ java.io.fileinputstream.open(native method)     @ java.io.fileinputstream.<init>(fileinputstream.java:120)     @ org.apache.hadoop.mapred.tasklogstruncater.truncatelogs(tasklogstruncater.java:199)     @ org.apache.hadoop.mapred.child$4.run(child.java:271)     @ java.security.accesscontroller.doprivileged(native method)     @ javax.security.auth.subject.doas(subject.java:396)     @ org.apache.hadoop.security.usergroupinformation.doas(usergroupinformation.java:1278)     @ org.apache.hadoop.mapred.child.main(child.java:260) 2013-08-28 18:34:05 debug tasklogstruncater:202 - cannot open /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/debugout reading. continuing other log files java.io.filenotfoundexception: /usr/lib/hadoop-0.20/logs/userlogs/job_201308281800_0008/attempt_201308281800_0008_m_000000_0/debugout (no such file or directory)     @ java.io.fileinputstream.open(native method)     @ java.io.fileinputstream.<init>(fileinputstream.java:120)     @ org.apache.hadoop.mapred.tasklogstruncater.truncatelogs(tasklogstruncater.java:199)     @ org.apache.hadoop.mapred.child$4.run(child.java:271)     @ java.security.accesscontroller.doprivileged(native method)     @ javax.security.auth.subject.doas(subject.java:396)     @ org.apache.hadoop.security.usergroupinformation.doas(usergroupinformation.java:1278)     @ org.apache.hadoop.mapred.child.main(child.java:260) 

i have checked permission , works fine sample wordcount program. new hadoop. googled not find substantial. using hadoop-0.20.2-cdh3u6 on single node setup.


Comments

Popular posts from this blog

Unable to remove the www from url on https using .htaccess -