Skip to content

Commit

Permalink
Handle S3Exception different since HDP2.2 doesn't put the jar file for
Browse files Browse the repository at this point in the history
S3Exception in the classpath for mapreduce mapper JVMs by default.

See:
    http://hortonworks.com/community/forums/topic/s3n-error-for-hdp-2-2/

Add some more Log.POST cases for debugging.
  • Loading branch information
tomkraljevic committed Feb 5, 2015
1 parent 9c653d0 commit d4cabbd
Showing 1 changed file with 24 additions and 9 deletions.
33 changes: 24 additions & 9 deletions src/main/java/water/persist/PersistHdfs.java
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.s3.S3Exception;

import water.*;
import water.Job.ProgressMonitor;
Expand All @@ -35,29 +34,40 @@ private static String getPathForKey(Key k) {
}

static {
Log.POST(4001, "");
Configuration conf = null;
Log.POST(4002, "");
if( H2O.OPT_ARGS.hdfs_config != null ) {
Log.POST(4003, "");
conf = new Configuration();
File p = new File(H2O.OPT_ARGS.hdfs_config);
if( !p.exists() ) Log.die("Unable to open hdfs configuration file " + p.getAbsolutePath());
conf.addResource(new Path(p.getAbsolutePath()));
Log.debug(Sys.HDFS_, "resource ", p.getAbsolutePath(), " added to the hadoop configuration");
Log.info(Sys.HDFS_, "resource ", p.getAbsolutePath(), " added to the hadoop configuration");
Log.POST(4004, "");
} else {
Log.POST(4005, "");
conf = new Configuration();
Log.POST(4006, "");
if( !Strings.isNullOrEmpty(H2O.OPT_ARGS.hdfs) ) {
// setup default remote Filesystem - for version 0.21 and higher
Log.POST(4007, "");
conf.set("fs.defaultFS", H2O.OPT_ARGS.hdfs);
// To provide compatibility with version 0.20.0 it is necessary to setup the property
// fs.default.name which was in newer version renamed to 'fs.defaultFS'
Log.POST(4008, "");
conf.set("fs.default.name", H2O.OPT_ARGS.hdfs);
}
}
Log.POST(4009, "");
CONF = conf;
Log.POST(4010, "");
}

// Loading HDFS files
PersistHdfs() {
Log.POST(4000, "");
_iceRoot = null;
}

Expand Down Expand Up @@ -216,15 +226,20 @@ private static void run(Callable c, boolean read, int size) {
ignoreAndWait(e, false);
} catch( SocketTimeoutException e ) {
ignoreAndWait(e, false);
} catch( S3Exception e ) {
// Preserve S3Exception before IOException
// Since this is tricky code - we are supporting different HDFS version
// New version declares S3Exception as IOException
// But old versions (0.20.xxx) declares it as RuntimeException
// So we have to catch it before IOException !!!
ignoreAndWait(e, false);
} catch( IOException e ) {
ignoreAndWait(e, true);
// Newer versions of Hadoop derive S3Exception from IOException
if (e.getClass().getName().contains("S3Exception")) {
ignoreAndWait(e, false);
} else {
ignoreAndWait(e, true);
}
} catch( RuntimeException e ) {
// Older versions of Hadoop derive S3Exception from RuntimeException
if (e.getClass().getName().contains("S3Exception")) {
ignoreAndWait(e, false);
} else {
throw Log.errRTExcept(e);
}
} catch( Exception e ) {
throw Log.errRTExcept(e);
}
Expand Down

0 comments on commit d4cabbd

Please sign in to comment.