diff --git a/NOTICE b/NOTICE
index da93bf945..b58fadbec 100644
--- a/NOTICE
+++ b/NOTICE
@@ -75,8 +75,8 @@ Bootstrap (http://getbootstrap.com/)
 Copyright 2011-2016 Twitter, Inc.
 License: MIT License (https://github.com/twbs/bootstrap/blob/master/LICENSE)
 
-Bootstrap Datepicker (http://www.eyecon.ro/bootstrap-datepicker)
-Copyright 2012 Stefan Petre
+Bootstrap Datepicker (https://github.com/eternicode/bootstrap-datepicker)
+Copyright 2012 Stefan Petre, Improvements by Andrew Rowls
 License: Apache 2.0
 
 D3.js (http://d3js.org)
diff --git a/app-conf/elephant.conf b/app-conf/elephant.conf
index 23e037ded..45ec6f23a 100644
--- a/app-conf/elephant.conf
+++ b/app-conf/elephant.conf
@@ -1,8 +1,10 @@
 port=8080
+
 db_url=localhost
 db_name=drelephant
 db_user=root
 db_password=
+
 #jvm_props="-Devolutionplugin=enabled -DapplyEvolutions.default=true"
 keytab_location="/export/apps/hadoop/keytabs/dr_elephant-service.keytab"
 keytab_user="elephant/eat1-magicaz01.grid.linkedin.com"
diff --git a/app/com/linkedin/drelephant/ElephantContext.java b/app/com/linkedin/drelephant/ElephantContext.java
index 92514643a..88de85721 100644
--- a/app/com/linkedin/drelephant/ElephantContext.java
+++ b/app/com/linkedin/drelephant/ElephantContext.java
@@ -28,7 +28,7 @@
 import com.linkedin.drelephant.configurations.fetcher.FetcherConfigurationData;
 import com.linkedin.drelephant.configurations.heuristic.HeuristicConfiguration;
 import com.linkedin.drelephant.configurations.heuristic.HeuristicConfigurationData;
-import com.linkedin.drelephant.configurations.jobtype.JobTypeConf;
+import com.linkedin.drelephant.configurations.jobtype.JobTypeConfiguration;
 import com.linkedin.drelephant.util.Utils;
 import java.lang.reflect.InvocationTargetException;
 import java.util.ArrayList;
@@ -175,8 +175,8 @@ private void loadHeuristics() {
 
     // Bind No_DATA heuristic to its helper pages, no need to add any real configurations
     _heuristicsConfData.add(
-        new HeuristicConfigurationData(HeuristicResult.NO_DATA.getAnalysis(), null, "views.html.help.helpNoData", null,
-            null));
+        new HeuristicConfigurationData(HeuristicResult.NO_DATA.getHeuristicName(),
+            HeuristicResult.NO_DATA.getHeuristicClassName(), "views.html.help.helpNoData", null, null));
   }
 
   /**
@@ -216,7 +216,8 @@ private void configureSupportedApplicationTypes() {
    * Load all the job types configured in JobTypeConf.xml
    */
   private void loadJobTypes() {
-    JobTypeConf conf = new JobTypeConf(JOB_TYPES_CONF);
+    Document document = Utils.loadXMLDoc(JOB_TYPES_CONF);
+    JobTypeConfiguration conf = new JobTypeConfiguration(document.getDocumentElement());
     _appTypeToJobTypes = conf.getAppTypeToJobTypeList();
   }
 
@@ -243,7 +244,7 @@ public Map<String, List<String>> getAllHeuristicNames() {
 
         List<String> nameList = new ArrayList<String>();
         for (Heuristic heuristic : list) {
-          nameList.add(heuristic.getHeuristicName());
+          nameList.add(heuristic.getHeuristicConfData().getHeuristicName());
         }
 
         Collections.sort(nameList);
diff --git a/app/com/linkedin/drelephant/ElephantRunner.java b/app/com/linkedin/drelephant/ElephantRunner.java
index 62d11fd3d..9db1acc80 100644
--- a/app/com/linkedin/drelephant/ElephantRunner.java
+++ b/app/com/linkedin/drelephant/ElephantRunner.java
@@ -32,7 +32,7 @@
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
 
-import model.JobResult;
+import models.AppResult;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -149,7 +149,7 @@ public void run() {
           analyticJob = _jobQueue.take();
           logger.info("Executor thread " + _threadId + " analyzing " + analyticJob.getAppType().getName() + " "
               + analyticJob.getAppId());
-          JobResult result = analyticJob.getAnalysis();
+          AppResult result = analyticJob.getAnalysis();
           result.save();
 
         } catch (InterruptedException ex) {
@@ -162,9 +162,10 @@ public void run() {
             logger.error("Add analytic job id [" + analyticJob.getAppId() + "] into the retry list.");
             _analyticJobGenerator.addIntoRetries(analyticJob);
           } else {
-            logger.error(
-                "Drop the analytic job. Reason: reached the max retries for application id = [" + analyticJob.getAppId()
-                    + "].");
+            if (analyticJob != null) {
+              logger.error("Drop the analytic job. Reason: reached the max retries for application id = ["
+                      + analyticJob.getAppId() + "].");
+            }
           }
         }
       }
diff --git a/app/com/linkedin/drelephant/analysis/AnalyticJob.java b/app/com/linkedin/drelephant/analysis/AnalyticJob.java
index f0151c419..f90d7ed11 100644
--- a/app/com/linkedin/drelephant/analysis/AnalyticJob.java
+++ b/app/com/linkedin/drelephant/analysis/AnalyticJob.java
@@ -20,9 +20,11 @@
 import com.linkedin.drelephant.util.InfoExtractor;
 import com.linkedin.drelephant.util.Utils;
 import java.util.ArrayList;
+import java.util.Date;
 import java.util.List;
-import model.JobHeuristicResult;
-import model.JobResult;
+import models.AppHeuristicResult;
+import models.AppHeuristicResultDetails;
+import models.AppResult;
 import org.apache.log4j.Logger;
 
 
@@ -41,6 +43,7 @@ public class AnalyticJob {
   private String _appId;
   private String _jobId;
   private String _name;
+  private String _queueName;
   private String _user;
   private String _trackingUrl;
   private long _startTime;
@@ -79,25 +82,24 @@ public AnalyticJob setAppId(String appId) {
   }
 
   /**
-   * Set the id of the job
-   * jobId is the appId with the prefix 'application_' replaced by 'job_'
+   * Set the name of the analytic job
    *
-   * @param jobId The job id
+   * @param name
    * @return The analytic job
    */
-  public AnalyticJob setJobId(String jobId) {
-    _jobId = jobId;
+  public AnalyticJob setName(String name) {
+    _name = name;
     return this;
   }
 
   /**
-   * Set the name of the analytic job
+   * Set the queue name in which the analytic jobs was submitted
    *
-   * @param name
+   * @param name the name of the queue
    * @return The analytic job
    */
-  public AnalyticJob setName(String name) {
-    _name = name;
+  public AnalyticJob setQueueName(String name) {
+    _queueName = name;
     return this;
   }
 
@@ -120,6 +122,10 @@ public AnalyticJob setUser(String user) {
    * @return The analytic job
    */
   public AnalyticJob setStartTime(long startTime) {
+    // TIMESTAMP range starts from FROM_UNIXTIME(1) = 1970-01-01 00:00:01
+    if (startTime <= 0) {
+      startTime = 1;
+    }
     _startTime = startTime;
     return this;
   }
@@ -131,6 +137,10 @@ public AnalyticJob setStartTime(long startTime) {
    * @return The analytic job
    */
   public AnalyticJob setFinishTime(long finishTime) {
+    // TIMESTAMP range starts from FROM_UNIXTIME(1) = 1970-01-01 00:00:01
+    if (finishTime <= 0) {
+      finishTime = 1;
+    }
     _finishTime = finishTime;
     return this;
   }
@@ -198,6 +208,15 @@ public String getTrackingUrl() {
     return _trackingUrl;
   }
 
+  /**
+   * Returns the queue in which the application was submitted
+   *
+   * @return The queue name
+   */
+  public String getQueueName() {
+    return _queueName;
+  }
+
   /**
    * Sets the tracking url for the job
    *
@@ -210,15 +229,15 @@ public AnalyticJob setTrackingUrl(String trackingUrl) {
   }
 
   /**
-   * Returns the analysed JobResult that could be directly serialized into DB.
+   * Returns the analysed AppResult that could be directly serialized into DB.
    *
    * This method fetches the data using the appropriate application fetcher, runs all the heuristics on them and
-   * loads it into the JobResult model.
+   * loads it into the AppResult model.
    *
    * @throws Exception if the analysis process encountered a problem.
-   * @return the analysed JobResult
+   * @return the analysed AppResult
    */
-  public JobResult getAnalysis() throws Exception {
+  public AppResult getAnalysis() throws Exception {
     ElephantFetcher fetcher = ElephantContext.instance().getFetcherForApplicationType(getAppType());
     HadoopApplicationData data = fetcher.fetchData(this);
 
@@ -227,6 +246,7 @@ public JobResult getAnalysis() throws Exception {
     if (data == null || data.isEmpty()) {
       // Example: a MR job has 0 mappers and 0 reducers
       logger.info("No Data Received for analytic job: " + getAppId());
+      HeuristicResult.NO_DATA.addResultDetail("No Data Received", "");
       analysisResults.add(HeuristicResult.NO_DATA);
     } else {
       List<Heuristic> heuristics = ElephantContext.instance().getHeuristicsForApplicationType(getAppType());
@@ -242,36 +262,45 @@ public JobResult getAnalysis() throws Exception {
     String jobTypeName = jobType == null ? UNKNOWN_JOB_TYPE : jobType.getName();
 
     // Load job information
-    JobResult result = new JobResult();
-    result.jobId = Utils.getJobIdFromApplicationId(getAppId());
-    result.url = getTrackingUrl();
+    AppResult result = new AppResult();
+    result.id = getAppId();
+    result.trackingUrl = getTrackingUrl();
+    result.queueName = getQueueName();
     result.username = getUser();
-    result.startTime = getStartTime();
-    result.analysisTime = getFinishTime();
-    result.jobName = getName();
+    result.startTime = new Date(getStartTime());
+    result.finishTime = new Date(getFinishTime());
+    result.name = getName();
     result.jobType = jobTypeName;
 
     // Truncate long names
-    if (result.jobName.length() > 100) {
-      result.jobName = result.jobName.substring(0, 97) + "...";
+    if (result.name.length() > 255) {
+      result.name = result.name.substring(0, 252) + "...";
     }
 
-    // Load Job Heuristic information
-    result.heuristicResults = new ArrayList<JobHeuristicResult>();
+    // Load App Heuristic information
+    int jobScore = 0;
+    result.yarnAppHeuristicResults = new ArrayList<AppHeuristicResult>();
     Severity worstSeverity = Severity.NONE;
     for (HeuristicResult heuristicResult : analysisResults) {
-      JobHeuristicResult detail = new JobHeuristicResult();
-      detail.analysisName = heuristicResult.getAnalysis();
-      detail.data = heuristicResult.getDetailsCSV();
-      detail.dataColumns = heuristicResult.getDetailsColumns();
+      AppHeuristicResult detail = new AppHeuristicResult();
+      detail.heuristicClass = heuristicResult.getHeuristicClassName();
+      detail.heuristicName = heuristicResult.getHeuristicName();
       detail.severity = heuristicResult.getSeverity();
-      if (detail.dataColumns < 1) {
-        detail.dataColumns = 1;
+      detail.score = heuristicResult.getScore();
+      for (HeuristicResultDetails heuristicResultDetails : heuristicResult.getHeuristicResultDetails()) {
+        AppHeuristicResultDetails heuristicDetail = new AppHeuristicResultDetails();
+        heuristicDetail.yarnAppHeuristicResult = detail;
+        heuristicDetail.name = heuristicResultDetails.getName();
+        heuristicDetail.value = heuristicResultDetails.getValue();
+        heuristicDetail.details = heuristicResultDetails.getDetails();
+        detail.yarnAppHeuristicResultDetails.add(heuristicDetail);
       }
-      result.heuristicResults.add(detail);
+      result.yarnAppHeuristicResults.add(detail);
       worstSeverity = Severity.max(worstSeverity, detail.severity);
+      jobScore += detail.score;
     }
     result.severity = worstSeverity;
+    result.score = jobScore;
 
     // Retrieve Azkaban execution, flow and jobs URLs from jobData and store them into result.
     InfoExtractor.retrieveURLs(result, data);
diff --git a/app/com/linkedin/drelephant/analysis/AnalyticJobGeneratorHadoop2.java b/app/com/linkedin/drelephant/analysis/AnalyticJobGeneratorHadoop2.java
index 2d9adf8a8..16a28cbb8 100644
--- a/app/com/linkedin/drelephant/analysis/AnalyticJobGeneratorHadoop2.java
+++ b/app/com/linkedin/drelephant/analysis/AnalyticJobGeneratorHadoop2.java
@@ -27,7 +27,7 @@
 import java.util.Queue;
 import java.util.Random;
 import java.util.concurrent.ConcurrentLinkedQueue;
-import model.JobResult;
+import models.AppResult;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
@@ -161,14 +161,14 @@ private List<AnalyticJob> readApps(URL url) throws IOException, AuthenticationEx
     JsonNode apps = rootNode.path("apps").path("app");
 
     for (JsonNode app : apps) {
-      String id = app.get("id").getValueAsText();
-      String jobId = Utils.getJobIdFromApplicationId(id);
+      String appId = app.get("id").getValueAsText();
 
       // When called first time after launch, hit the DB and avoid duplicated analytic jobs that have been analyzed
       // before.
-      if (_lastTime > 0 || _lastTime == 0 && JobResult.find.byId(jobId) == null && JobResult.find.byId(id) == null) {
+      if (_lastTime > 0 || (_lastTime == 0 && AppResult.find.byId(appId) == null)) {
         String user = app.get("user").getValueAsText();
         String name = app.get("name").getValueAsText();
+        String queueName = app.get("queue").getValueAsText();
         String trackingUrl = app.get("trackingUrl") != null? app.get("trackingUrl").getValueAsText() : null;
         long startTime = app.get("startedTime").getLongValue();
         long finishTime = app.get("finishedTime").getLongValue();
@@ -179,7 +179,7 @@ private List<AnalyticJob> readApps(URL url) throws IOException, AuthenticationEx
         // If the application type is supported
         if (type != null) {
           AnalyticJob analyticJob = new AnalyticJob();
-          analyticJob.setAppId(id).setAppType(type).setJobId(jobId).setUser(user).setName(name)
+          analyticJob.setAppId(appId).setAppType(type).setUser(user).setName(name).setQueueName(queueName)
               .setTrackingUrl(trackingUrl).setStartTime(startTime).setFinishTime(finishTime);
 
           appList.add(analyticJob);
diff --git a/app/com/linkedin/drelephant/analysis/Heuristic.java b/app/com/linkedin/drelephant/analysis/Heuristic.java
index f7d906aad..31e7a0a67 100644
--- a/app/com/linkedin/drelephant/analysis/Heuristic.java
+++ b/app/com/linkedin/drelephant/analysis/Heuristic.java
@@ -16,6 +16,9 @@
 
 package com.linkedin.drelephant.analysis;
 
+import com.linkedin.drelephant.configurations.heuristic.HeuristicConfigurationData;
+
+
 /**
  * This interface defines the Heuristic rule interface.
  *
@@ -31,9 +34,9 @@ public interface Heuristic<T extends HadoopApplicationData> {
   public HeuristicResult apply(T data);
 
   /**
-   * Get the heuristic name
+   * Get the heuristic Configuration
    *
-   * @return the name
+   * @return the heuristic configuration data
    */
-  public String getHeuristicName();
+  public HeuristicConfigurationData getHeuristicConfData();
 }
diff --git a/app/com/linkedin/drelephant/analysis/HeuristicResult.java b/app/com/linkedin/drelephant/analysis/HeuristicResult.java
index 558cc24d6..ad2aff499 100644
--- a/app/com/linkedin/drelephant/analysis/HeuristicResult.java
+++ b/app/com/linkedin/drelephant/analysis/HeuristicResult.java
@@ -27,32 +27,46 @@
  * Holds the Heuristic analysis result Information
  */
 public class HeuristicResult {
-  public static final HeuristicResult NO_DATA = new HeuristicResult("No Data Received", Severity.LOW);
+  public static final HeuristicResult NO_DATA = new HeuristicResult("NoDataReceived", "No Data Received", Severity.LOW, 0);
 
-  private String _analysis;
+  private String _heuristicClass;
+  private String _heuristicName;
   private Severity _severity;
-  private List<String> _details;
-  private int _detailsColumns = 0;
+  private int _score;
+  private List<HeuristicResultDetails> _heuristicResultDetails;
 
   /**
    * Heuristic Result Constructor
    *
-   * @param analysis The name of the heuristic
-   * @param severity The severity level of the heuristic
+   * @param heuristicClass The Heuristic class
+   * @param heuristicName The name of the Heursitic
+   * @param severity The severity of the result
+   * @param score The computed score
    */
-  public HeuristicResult(String analysis, Severity severity) {
-    this._analysis = analysis;
+  public HeuristicResult(String heuristicClass, String heuristicName, Severity severity, int score) {
+    this._heuristicClass = heuristicClass;
+    this._heuristicName = heuristicName;
     this._severity = severity;
-    this._details = new ArrayList<String>();
+    this._score = score;
+    this._heuristicResultDetails = new ArrayList<HeuristicResultDetails>();
+  }
+
+  /**
+   * Returns the heuristic analyser class name
+   *
+   * @return the heursitic class name
+   */
+  public String getHeuristicClassName() {
+    return _heuristicClass;
   }
 
   /**
    * Returns the heuristic analyser name
    *
-   * @return the analysis name
+   * @return the heuristic name
    */
-  public String getAnalysis() {
-    return _analysis;
+  public String getHeuristicName() {
+    return _heuristicName;
   }
 
   /**
@@ -64,43 +78,31 @@ public Severity getSeverity() {
     return _severity;
   }
 
-  /**
-   * Gets a list of lines of comma-separated strings
-   *
-   * @return
-   */
-  public List<String> getDetails() {
-    return _details;
+  public int getScore() {
+    return _score;
   }
 
   /**
-   * Create a string that contains lines of comma-separated strings
+   * Gets a list of HeuristicResultDetails
    *
    * @return
    */
-  public String getDetailsCSV() {
-    return Utils.combineCsvLines(_details.toArray(new String[_details.size()]));
+  public List<HeuristicResultDetails> getHeuristicResultDetails() {
+    return _heuristicResultDetails;
   }
 
   /**
-   * Gets the number of columns in the csv formatted details store
-   *
-   * @return
+   * Add the App Heuristic Result Detail entry
    */
-  public int getDetailsColumns() {
-    return _detailsColumns;
+  public void addResultDetail(String name, String value, String details) {
+    _heuristicResultDetails.add(new HeuristicResultDetails(name, value, details));
   }
 
   /**
-   * Add a new line to the csv formatted details store
-   *
-   * @param parts strings to join into a single line
+   * Add the App Heuristic Result Detail without details
    */
-  public void addDetail(String... parts) {
-    _details.add(Utils.createCsvLine(parts));
-    if (parts.length > _detailsColumns) {
-      _detailsColumns = parts.length;
-    }
+  public void addResultDetail(String name, String value) {
+    _heuristicResultDetails.add(new HeuristicResultDetails(name, value, null));
   }
 
   /**
@@ -114,7 +116,7 @@ public void setSeverity(Severity severity) {
 
   @Override
   public String toString() {
-    return "{analysis: " + _analysis + ", severity: " + _severity + ", details: [" + StringUtils.join(_details, "    ")
-        + "]}";
+    return "{analysis: " + _heuristicClass + ", severity: " + _severity + ", details: ["
+        + StringUtils.join(_heuristicResultDetails, "    ") + "]}";
   }
 }
diff --git a/app/com/linkedin/drelephant/analysis/HeuristicResultDetails.java b/app/com/linkedin/drelephant/analysis/HeuristicResultDetails.java
new file mode 100644
index 000000000..5503f4931
--- /dev/null
+++ b/app/com/linkedin/drelephant/analysis/HeuristicResultDetails.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.linkedin.drelephant.analysis;
+
+import java.util.ArrayList;
+import java.util.List;
+
+
+/**
+ * Holds the analysis details for each Heuristic
+ */
+public class HeuristicResultDetails {
+
+  private String _name;
+  private String _value;
+  private String _details;
+
+  public HeuristicResultDetails(String name, String value, String details) {
+    this._name = name;
+    this._value = value;
+    this._details = details;
+  }
+
+  public String getDetails() {
+    return _details;
+  }
+
+  public String getValue() {
+    return _value;
+  }
+
+  public String getName() {
+    return _name;
+  }
+}
diff --git a/app/com/linkedin/drelephant/configurations/jobtype/JobTypeConf.java b/app/com/linkedin/drelephant/configurations/jobtype/JobTypeConfiguration.java
similarity index 93%
rename from app/com/linkedin/drelephant/configurations/jobtype/JobTypeConf.java
rename to app/com/linkedin/drelephant/configurations/jobtype/JobTypeConfiguration.java
index b55f85976..21e61b5bf 100644
--- a/app/com/linkedin/drelephant/configurations/jobtype/JobTypeConf.java
+++ b/app/com/linkedin/drelephant/configurations/jobtype/JobTypeConfiguration.java
@@ -36,36 +36,32 @@
 /**
  * This class manages the job type configurations
  */
-public class JobTypeConf {
-  private static final Logger logger = Logger.getLogger(JobTypeConf.class);
+public class JobTypeConfiguration {
+  private static final Logger logger = Logger.getLogger(JobTypeConfiguration.class);
   private static final int TYPE_LEN_LIMIT = 20;
-  private final String _configFilePath;
 
   private Map<ApplicationType, List<JobType>> _appTypeToJobTypeList = new HashMap<ApplicationType, List<JobType>>();
 
-  public JobTypeConf(String configFilePath) {
-    _configFilePath = configFilePath;
-    parseJobTypeConf();
+  public JobTypeConfiguration(Element configuration) {
+    parseJobTypeConfiguration(configuration);
   }
 
   public Map<ApplicationType, List<JobType>> getAppTypeToJobTypeList() {
     return _appTypeToJobTypeList;
   }
 
-  private void parseJobTypeConf() {
-    logger.info("Loading job type config file " + _configFilePath);
+  private void parseJobTypeConfiguration(Element configuration) {
 
     Map<ApplicationType, JobType> defaultMap = new HashMap<ApplicationType, JobType>();
 
-    Document document = Utils.loadXMLDoc(_configFilePath);
-
-    NodeList nodes = document.getDocumentElement().getChildNodes();
+    NodeList nodes = configuration.getChildNodes();
+    int n = 0;
     for (int i = 0; i < nodes.getLength(); i++) {
       Node node = nodes.item(i);
-      int n = 0;
       if (node.getNodeType() == Node.ELEMENT_NODE) {
         n++;
         Element jobTypeNode = (Element) node;
+
         String jobTypeName;
         Node jobTypeNameNode = jobTypeNode.getElementsByTagName("name").item(0);
         if (jobTypeNameNode == null) {
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/ExceptionHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/ExceptionHeuristic.java
index d23ed730b..b5cb7e52f 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/ExceptionHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/ExceptionHeuristic.java
@@ -25,7 +25,6 @@
 
 public class ExceptionHeuristic implements Heuristic<MapReduceApplicationData> {
 
-  public static final String HEURISTIC_NAME = "Exception";
   private HeuristicConfigurationData _heuristicConfData;
 
   public ExceptionHeuristic(HeuristicConfigurationData heuristicConfData) {
@@ -33,8 +32,8 @@ public ExceptionHeuristic(HeuristicConfigurationData heuristicConfData) {
   }
 
   @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
   }
 
   @Override
@@ -42,14 +41,15 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     if (data.getSucceeded()) {
       return null;
     }
-    HeuristicResult result = new HeuristicResult(HEURISTIC_NAME, Severity.MODERATE);
+    HeuristicResult result = new HeuristicResult(
+        _heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(), Severity.MODERATE, 0);
     String diagnosticInfo = data.getDiagnosticInfo();
     if (diagnosticInfo != null) {
-      result.addDetail(diagnosticInfo);
+      result.addResultDetail("Error", "Stacktrace", diagnosticInfo);
     } else {
-      String msg = "Unable to find stacktrace info. Please find the real problem in the Jobhistory link above.\n"
+      String msg = "Unable to find stacktrace info. Please find the real problem in the Jobhistory link above."
           + "Exception can happen either in task log or Application Master log.";
-      result.addDetail(msg);
+      result.addResultDetail("Error", msg);
     }
     return result;
   }
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/GenericDataSkewHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/GenericDataSkewHeuristic.java
index 127b8ccb6..2c75e8bb0 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/GenericDataSkewHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/GenericDataSkewHeuristic.java
@@ -54,59 +54,52 @@ public abstract class GenericDataSkewHeuristic implements Heuristic<MapReduceApp
   private double[] filesLimits = {1d/8, 1d/4, 1d/2, 1d};  // Fraction of HDFS Block Size
 
   private MapReduceCounterData.CounterName _counterName;
-  private String _heuristicName;
   private HeuristicConfigurationData _heuristicConfData;
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(NUM_TASKS_SEVERITY) != null) {
-      double[] confNumTasksThreshold = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
-      if (confNumTasksThreshold != null) {
-        numTasksLimits = confNumTasksThreshold;
-      }
+    double[] confNumTasksThreshold = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
+    if (confNumTasksThreshold != null) {
+      numTasksLimits = confNumTasksThreshold;
     }
-    logger.info(_heuristicName + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(numTasksLimits));
 
-    if(paramMap.get(DEVIATION_SEVERITY) != null) {
-      double[] confDeviationThreshold = Utils.getParam(paramMap.get(DEVIATION_SEVERITY), deviationLimits.length);
-      if (confDeviationThreshold != null) {
-        deviationLimits = confDeviationThreshold;
-      }
+    double[] confDeviationThreshold = Utils.getParam(paramMap.get(DEVIATION_SEVERITY), deviationLimits.length);
+    if (confDeviationThreshold != null) {
+      deviationLimits = confDeviationThreshold;
     }
-    logger.info(_heuristicName + " will use " + DEVIATION_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + DEVIATION_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(deviationLimits));
 
-    if(paramMap.get(FILES_SEVERITY) != null) {
-      double[] confFilesThreshold = Utils.getParam(paramMap.get(FILES_SEVERITY), filesLimits.length);
-      if (confFilesThreshold != null) {
-        filesLimits = confFilesThreshold;
-      }
+    double[] confFilesThreshold = Utils.getParam(paramMap.get(FILES_SEVERITY), filesLimits.length);
+    if (confFilesThreshold != null) {
+      filesLimits = confFilesThreshold;
     }
-    logger.info(_heuristicName + " will use " + FILES_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + FILES_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(filesLimits));
     for (int i = 0; i < filesLimits.length; i++) {
       filesLimits[i] = filesLimits[i] * HDFSContext.HDFS_BLOCK_SIZE;
     }
   }
 
-  protected GenericDataSkewHeuristic(MapReduceCounterData.CounterName counterName, String heuristicName,
+  protected GenericDataSkewHeuristic(MapReduceCounterData.CounterName counterName,
       HeuristicConfigurationData heuristicConfData) {
     this._counterName = counterName;
-    this._heuristicName = heuristicName;
     this._heuristicConfData = heuristicConfData;
 
     loadParameters();
   }
 
+  protected abstract MapReduceTaskData[] getTasks(MapReduceApplicationData data);
+
   @Override
-  public String getHeuristicName() {
-    return _heuristicName;
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
   }
 
-  protected abstract MapReduceTaskData[] getTasks(MapReduceApplicationData data);
-
   @Override
   public HeuristicResult apply(MapReduceApplicationData data) {
 
@@ -145,11 +138,12 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     severity = Severity.min(severity, Severity.getSeverityAscending(
         groups[0].length, numTasksLimits[0], numTasksLimits[1], numTasksLimits[2], numTasksLimits[3]));
 
-    HeuristicResult result = new HeuristicResult(_heuristicName, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(tasks.length));
-    result.addDetail("Group A", Math.round(groups[0].length*scale) + " tasks @ " + FileUtils.byteCountToDisplaySize(avg1) + " avg");
-    result.addDetail("Group B", Math.round(groups[1].length*scale) + " tasks @ " + FileUtils.byteCountToDisplaySize(avg2) + " avg");
+    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
+    result.addResultDetail("Group A", groups[0].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg1) + " avg");
+    result.addResultDetail("Group B", groups[1].length + " tasks @ " + FileUtils.byteCountToDisplaySize(avg2) + " avg");
 
     return result;
   }
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/GenericGCHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/GenericGCHeuristic.java
index 0723c68fc..8513c0586 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/GenericGCHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/GenericGCHeuristic.java
@@ -46,41 +46,31 @@ public abstract class GenericGCHeuristic implements Heuristic<MapReduceApplicati
   private double[] gcRatioLimits = {0.01d, 0.02d, 0.03d, 0.04d};   // Garbage Collection Time / CPU Time
   private double[] runtimeLimits = {5, 10, 12, 15};                // Task Runtime in milli sec
 
-  private String _heuristicName;
   private HeuristicConfigurationData _heuristicConfData;
 
-  @Override
-  public String getHeuristicName() {
-    return _heuristicName;
-  }
-
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(GC_RATIO_SEVERITY) != null) {
-      double[] confGcRatioThreshold = Utils.getParam(paramMap.get(GC_RATIO_SEVERITY), gcRatioLimits.length);
-      if (confGcRatioThreshold != null) {
-        gcRatioLimits = confGcRatioThreshold;
-      }
+    double[] confGcRatioThreshold = Utils.getParam(paramMap.get(GC_RATIO_SEVERITY), gcRatioLimits.length);
+    if (confGcRatioThreshold != null) {
+      gcRatioLimits = confGcRatioThreshold;
     }
-    logger.info(_heuristicName + " will use " + GC_RATIO_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + GC_RATIO_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(gcRatioLimits));
 
-    if(paramMap.get(RUNTIME_SEVERITY) != null) {
-      double[] confRuntimeThreshold = Utils.getParam(paramMap.get(RUNTIME_SEVERITY), runtimeLimits.length);
-      if (confRuntimeThreshold != null) {
-        runtimeLimits = confRuntimeThreshold;
-      }
+    double[] confRuntimeThreshold = Utils.getParam(paramMap.get(RUNTIME_SEVERITY), runtimeLimits.length);
+    if (confRuntimeThreshold != null) {
+      runtimeLimits = confRuntimeThreshold;
     }
-    logger.info(_heuristicName + " will use " + RUNTIME_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + RUNTIME_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(runtimeLimits));
     for (int i = 0; i < runtimeLimits.length; i++) {
       runtimeLimits[i] = runtimeLimits[i] * Statistics.MINUTE_IN_MS;
     }
   }
 
-  protected GenericGCHeuristic(String heuristicName, HeuristicConfigurationData heuristicConfData) {
-    this._heuristicName = heuristicName;
+  protected GenericGCHeuristic(HeuristicConfigurationData heuristicConfData) {
     this._heuristicConfData = heuristicConfData;
 
     loadParameters();
@@ -88,6 +78,11 @@ protected GenericGCHeuristic(String heuristicName, HeuristicConfigurationData he
 
   protected abstract MapReduceTaskData[] getTasks(MapReduceApplicationData data);
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(MapReduceApplicationData data) {
 
@@ -120,13 +115,14 @@ public HeuristicResult apply(MapReduceApplicationData data) {
       severity = getGcRatioSeverity(avgRuntimeMs, avgCpuMs, avgGcMs);
     }
 
-    HeuristicResult result = new HeuristicResult(_heuristicName, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(tasks.length));
-    result.addDetail("Avg task runtime (ms)", Long.toString(avgRuntimeMs));
-    result.addDetail("Avg task CPU time (ms)", Long.toString(avgCpuMs));
-    result.addDetail("Avg task GC time (ms)", Long.toString(avgGcMs));
-    result.addDetail("Task GC/CPU ratio", Double.toString(ratio));
+    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
+    result.addResultDetail("Avg task runtime (ms)", Long.toString(avgRuntimeMs));
+    result.addResultDetail("Avg task CPU time (ms)", Long.toString(avgCpuMs));
+    result.addResultDetail("Avg task GC time (ms)", Long.toString(avgGcMs));
+    result.addResultDetail("Task GC/CPU ratio", Double.toString(ratio));
     return result;
   }
 
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/GenericMemoryHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/GenericMemoryHeuristic.java
index 9d0622459..c7dabeecc 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/GenericMemoryHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/GenericMemoryHeuristic.java
@@ -50,42 +50,31 @@ public abstract class GenericMemoryHeuristic implements Heuristic<MapReduceAppli
   private double[] memoryLimits = {1.1d, 1.5d, 2.0d, 2.5d};   // Container Memory Severity Limits
 
   private String _containerMemConf;
-  private String _heuristicName;
   private HeuristicConfigurationData _heuristicConfData;
 
-  @Override
-  public String getHeuristicName() {
-    return _heuristicName;
-  }
-
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(MEM_RATIO_SEVERITY) != null) {
-      double[] confMemRatioLimits = Utils.getParam(paramMap.get(MEM_RATIO_SEVERITY), memRatioLimits.length);
-      if (confMemRatioLimits != null) {
-        memRatioLimits = confMemRatioLimits;
-      }
+    double[] confMemRatioLimits = Utils.getParam(paramMap.get(MEM_RATIO_SEVERITY), memRatioLimits.length);
+    if (confMemRatioLimits != null) {
+      memRatioLimits = confMemRatioLimits;
     }
-    logger.info(_heuristicName + " will use " + MEM_RATIO_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + MEM_RATIO_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(memRatioLimits));
 
-    if(paramMap.get(CONTAINER_MEM_SEVERITY) != null) {
-      double[] confMemoryLimits = Utils.getParam(paramMap.get(CONTAINER_MEM_SEVERITY), memoryLimits.length);
-      if (confMemoryLimits != null) {
-        memoryLimits = confMemoryLimits;
-      }
+    double[] confMemoryLimits = Utils.getParam(paramMap.get(CONTAINER_MEM_SEVERITY), memoryLimits.length);
+    if (confMemoryLimits != null) {
+      memoryLimits = confMemoryLimits;
     }
-    logger.info(_heuristicName + " will use " + CONTAINER_MEM_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + CONTAINER_MEM_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(memoryLimits));
     for (int i = 0; i < memoryLimits.length; i++) {
       memoryLimits[i] = memoryLimits[i] * CONTAINER_MEMORY_DEFAULT_BYTES;
     }
   }
 
-  protected GenericMemoryHeuristic(String containerMemConf, String heuristicName,
-      HeuristicConfigurationData heuristicConfData) {
-    this._heuristicName = heuristicName;
+  protected GenericMemoryHeuristic(String containerMemConf, HeuristicConfigurationData heuristicConfData) {
     this._containerMemConf = containerMemConf;
     this._heuristicConfData = heuristicConfData;
 
@@ -94,6 +83,11 @@ protected GenericMemoryHeuristic(String containerMemConf, String heuristicName,
 
   protected abstract MapReduceTaskData[] getTasks(MapReduceApplicationData data);
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(MapReduceApplicationData data) {
 
@@ -102,6 +96,9 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     }
 
     String containerSizeStr = data.getConf().getProperty(_containerMemConf);
+    if (containerSizeStr == null) {
+      return null;
+    }
 
     long containerMem;
     try {
@@ -151,15 +148,16 @@ public HeuristicResult apply(MapReduceApplicationData data) {
       severity = getTaskMemoryUtilSeverity(taskPMemAvg, containerMem);
     }
 
-    HeuristicResult result = new HeuristicResult(_heuristicName, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(tasks.length));
-    result.addDetail("Avg task runtime", Statistics.readableTimespan(averageTimeMs));
-    result.addDetail("Avg Physical Memory (MB)", Long.toString(taskPMemAvg/FileUtils.ONE_MB));
-    result.addDetail("Max Physical Memory (MB)", Long.toString(taskPMax/FileUtils.ONE_MB));
-    result.addDetail("Min Physical Memory (MB)", Long.toString(taskPMin/FileUtils.ONE_MB));
-    result.addDetail("Avg Virtual Memory (MB)", Long.toString(taskVMemAvg/FileUtils.ONE_MB));
-    result.addDetail("Requested Container Memory", FileUtils.byteCountToDisplaySize(containerMem));
+    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
+    result.addResultDetail("Avg task runtime", Statistics.readableTimespan(averageTimeMs));
+    result.addResultDetail("Avg Physical Memory (MB)", Long.toString(taskPMemAvg / FileUtils.ONE_MB));
+    result.addResultDetail("Max Physical Memory (MB)", Long.toString(taskPMax / FileUtils.ONE_MB));
+    result.addResultDetail("Min Physical Memory (MB)", Long.toString(taskPMin / FileUtils.ONE_MB));
+    result.addResultDetail("Avg Virtual Memory (MB)", Long.toString(taskVMemAvg / FileUtils.ONE_MB));
+    result.addResultDetail("Requested Container Memory", FileUtils.byteCountToDisplaySize(containerMem));
 
     return result;
   }
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/JobQueueLimitHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/JobQueueLimitHeuristic.java
index 2e715b8ed..0ca8c0ce8 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/JobQueueLimitHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/JobQueueLimitHeuristic.java
@@ -28,22 +28,23 @@
 
 
 public class JobQueueLimitHeuristic implements Heuristic<MapReduceApplicationData> {
-  public static final String HEURISTIC_NAME = "Queue Time Limit";
 
   private HeuristicConfigurationData _heuristicConfData;
 
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
-  }
-
   protected JobQueueLimitHeuristic(HeuristicConfigurationData heuristicConfData) {
     this._heuristicConfData = heuristicConfData;
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(MapReduceApplicationData data) {
-    HeuristicResult result = new HeuristicResult(HEURISTIC_NAME, Severity.NONE);
+
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), Severity.NONE, 0);
     Properties jobConf = data.getConf();
     long queueTimeoutLimitMs = TimeUnit.MINUTES.toMillis(15);
 
@@ -59,29 +60,29 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     Severity[] mapTasksSeverity = new Severity[mapTasks.length];
     Severity[] redTasksSeverity = new Severity[redTasks.length];
     if (queueName.equals("default")) {
-      result.addDetail("Queue: ", queueName);
-      result.addDetail("Number of Map tasks", Integer.toString(mapTasks.length));
-      result.addDetail("Number of Reduce tasks", Integer.toString(redTasks.length));
+      result.addResultDetail("Queue: ", queueName, null);
+      result.addResultDetail("Number of Map tasks", Integer.toString(mapTasks.length));
+      result.addResultDetail("Number of Reduce tasks", Integer.toString(redTasks.length));
 
       // Calculate Severity of Mappers
       mapTasksSeverity = getTasksSeverity(mapTasks, queueTimeoutLimitMs);
-      result.addDetail("Number of Map tasks that are in severe state (14 to 14.5 min)",
+      result.addResultDetail("Number of Map tasks that are in severe state (14 to 14.5 min)",
           Long.toString(getSeverityFrequency(Severity.SEVERE, mapTasksSeverity)));
-      result.addDetail("Number of Map tasks that are in critical state (over 14.5 min)",
+      result.addResultDetail("Number of Map tasks that are in critical state (over 14.5 min)",
           Long.toString(getSeverityFrequency(Severity.CRITICAL, mapTasksSeverity)));
 
       // Calculate Severity of Reducers
       redTasksSeverity = getTasksSeverity(redTasks, queueTimeoutLimitMs);
-      result.addDetail("Number of Reduce tasks that are in severe state (14 to 14.5 min)",
+      result.addResultDetail("Number of Reduce tasks that are in severe state (14 to 14.5 min)",
           Long.toString(getSeverityFrequency(Severity.SEVERE, redTasksSeverity)));
-      result.addDetail("Number of Reduce tasks that are in critical state (over 14.5 min)",
+      result.addResultDetail("Number of Reduce tasks that are in critical state (over 14.5 min)",
           Long.toString(getSeverityFrequency(Severity.CRITICAL, redTasksSeverity)));
 
       // Calculate Job severity
       result.setSeverity(Severity.max(Severity.max(mapTasksSeverity), Severity.max(redTasksSeverity)));
 
     } else {
-      result.addDetail("This Heuristic is not applicable to " + queueName + " queue");
+      result.addResultDetail("Not Applicable", "This Heuristic is not applicable to " + queueName + " queue");
       result.setSeverity(Severity.NONE);
     }
     return result;
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperDataSkewHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperDataSkewHeuristic.java
index 800046847..e598b6807 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperDataSkewHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperDataSkewHeuristic.java
@@ -26,10 +26,9 @@
  * This Heuristic analyses the skewness in the mapper input data
  */
 public class MapperDataSkewHeuristic extends GenericDataSkewHeuristic {
-  public static final String HEURISTIC_NAME = "Mapper Data Skew";
 
   public MapperDataSkewHeuristic(HeuristicConfigurationData heuristicConfData) {
-    super(MapReduceCounterData.CounterName.HDFS_BYTES_READ, HEURISTIC_NAME, heuristicConfData);
+    super(MapReduceCounterData.CounterName.HDFS_BYTES_READ, heuristicConfData);
   }
 
   @Override
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperGCHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperGCHeuristic.java
index 2736e860f..e57320128 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperGCHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperGCHeuristic.java
@@ -22,15 +22,9 @@
 
 
 public class MapperGCHeuristic extends GenericGCHeuristic {
-  public static final String HEURISTIC_NAME = "Mapper GC";
 
   public MapperGCHeuristic(HeuristicConfigurationData heuristicConfData) {
-    super(HEURISTIC_NAME, heuristicConfData);
-  }
-
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+    super(heuristicConfData);
   }
 
   @Override
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperMemoryHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperMemoryHeuristic.java
index 741c094d4..b973b7450 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperMemoryHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperMemoryHeuristic.java
@@ -22,16 +22,10 @@
 
 
 public class MapperMemoryHeuristic extends GenericMemoryHeuristic {
-  public static final String HEURISTIC_NAME = "Mapper Memory";
   public static final String MAPPER_MEMORY_CONF = "mapreduce.map.memory.mb";
 
   public MapperMemoryHeuristic(HeuristicConfigurationData _heuristicConfData) {
-    super(MAPPER_MEMORY_CONF, HEURISTIC_NAME, _heuristicConfData);
-  }
-
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+    super(MAPPER_MEMORY_CONF, _heuristicConfData);
   }
 
   @Override
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpeedHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpeedHeuristic.java
index 2b9c1c820..6cdd1abf5 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpeedHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpeedHeuristic.java
@@ -39,7 +39,6 @@
 
 public class MapperSpeedHeuristic implements Heuristic<MapReduceApplicationData> {
   private static final Logger logger = Logger.getLogger(MapperSpeedHeuristic.class);
-  public static final String HEURISTIC_NAME = "Mapper Speed";
 
   // Severity parameters.
   private static final String DISK_SPEED_SEVERITY = "disk_speed_severity";
@@ -53,26 +52,23 @@ public class MapperSpeedHeuristic implements Heuristic<MapReduceApplicationData>
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(DISK_SPEED_SEVERITY) != null) {
-      double[] confDiskSpeedThreshold = Utils.getParam(paramMap.get(DISK_SPEED_SEVERITY), diskSpeedLimits.length);
-      if (confDiskSpeedThreshold != null) {
-        diskSpeedLimits = confDiskSpeedThreshold;
-      }
+    double[] confDiskSpeedThreshold = Utils.getParam(paramMap.get(DISK_SPEED_SEVERITY), diskSpeedLimits.length);
+    if (confDiskSpeedThreshold != null) {
+      diskSpeedLimits = confDiskSpeedThreshold;
     }
-    logger.info(HEURISTIC_NAME + " will use " + DISK_SPEED_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + DISK_SPEED_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(diskSpeedLimits));
     for (int i = 0; i < diskSpeedLimits.length; i++) {
       diskSpeedLimits[i] = diskSpeedLimits[i] * HDFSContext.HDFS_BLOCK_SIZE;
     }
 
-    if(paramMap.get(RUNTIME_SEVERITY) != null) {
-      double[] confRuntimeThreshold = Utils.getParam(paramMap.get(RUNTIME_SEVERITY), runtimeLimits.length);
-      if (confRuntimeThreshold != null) {
-        runtimeLimits = confRuntimeThreshold;
-      }
+    double[] confRuntimeThreshold = Utils.getParam(paramMap.get(RUNTIME_SEVERITY), runtimeLimits.length);
+    if (confRuntimeThreshold != null) {
+      runtimeLimits = confRuntimeThreshold;
     }
-    logger.info(HEURISTIC_NAME + " will use " + RUNTIME_SEVERITY + " with the following threshold settings: " + Arrays
+    logger.info(heuristicName + " will use " + RUNTIME_SEVERITY + " with the following threshold settings: " + Arrays
         .toString(runtimeLimits));
     for (int i = 0; i < runtimeLimits.length; i++) {
       runtimeLimits[i] = runtimeLimits[i] * Statistics.MINUTE_IN_MS;
@@ -85,8 +81,8 @@ public MapperSpeedHeuristic(HeuristicConfigurationData heuristicConfData) {
   }
 
   @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
   }
 
   @Override
@@ -133,12 +129,13 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     //This reduces severity if task runtime is insignificant
     severity = Severity.min(severity, getRuntimeSeverity(medianRuntimeMs));
 
-    HeuristicResult result = new HeuristicResult(HEURISTIC_NAME, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(tasks.length));
-    result.addDetail("Median task input size", FileUtils.byteCountToDisplaySize(medianSize));
-    result.addDetail("Median task runtime", Statistics.readableTimespan(medianRuntimeMs));
-    result.addDetail("Median task speed", FileUtils.byteCountToDisplaySize(medianSpeed) + "/s");
+    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
+    result.addResultDetail("Median task input size", FileUtils.byteCountToDisplaySize(medianSize));
+    result.addResultDetail("Median task runtime", Statistics.readableTimespan(medianRuntimeMs));
+    result.addResultDetail("Median task speed", FileUtils.byteCountToDisplaySize(medianSpeed) + "/s");
 
     return result;
   }
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpillHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpillHeuristic.java
index ea5fc9ec5..54c875734 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpillHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperSpillHeuristic.java
@@ -31,7 +31,6 @@
 
 public class MapperSpillHeuristic implements Heuristic<MapReduceApplicationData> {
   private static final Logger logger = Logger.getLogger(MapperSpillHeuristic.class);
-  public static final String HEURISTIC_NAME = "Mapper Spill";
   private static final long THRESHOLD_SPILL_FACTOR = 10000;
 
   // Severity parameters.
@@ -46,24 +45,21 @@ public class MapperSpillHeuristic implements Heuristic<MapReduceApplicationData>
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(NUM_TASKS_SEVERITY) != null) {
-      double[] confNumTasksThreshold = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
-      if (confNumTasksThreshold != null) {
-        numTasksLimits = confNumTasksThreshold;
-      }
+    double[] confNumTasksThreshold = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
+    if (confNumTasksThreshold != null) {
+      numTasksLimits = confNumTasksThreshold;
     }
-    logger.info(HEURISTIC_NAME + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(numTasksLimits));
 
-    if(paramMap.get(SPILL_SEVERITY) != null) {
-      double[] confSpillThreshold = Utils.getParam(paramMap.get(SPILL_SEVERITY), spillLimits.length);
-      if (confSpillThreshold != null) {
-        spillLimits = confSpillThreshold;
-      }
+    double[] confSpillThreshold = Utils.getParam(paramMap.get(SPILL_SEVERITY), spillLimits.length);
+    if (confSpillThreshold != null) {
+      spillLimits = confSpillThreshold;
     }
-    logger.info(HEURISTIC_NAME + " will use " + SPILL_SEVERITY + " with the following threshold settings: "
-        + Arrays.toString(spillLimits));
+    logger.info(heuristicName + " will use " + SPILL_SEVERITY + " with the following threshold settings: " + Arrays
+        .toString(spillLimits));
     for (int i = 0; i < spillLimits.length; i++) {
       spillLimits[i] = spillLimits[i] * THRESHOLD_SPILL_FACTOR;
     }
@@ -74,6 +70,11 @@ public MapperSpillHeuristic(HeuristicConfigurationData heuristicConfData) {
     loadParameters();
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(MapReduceApplicationData data) {
 
@@ -108,23 +109,20 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     Severity taskSeverity = getNumTasksSeverity(tasks.length);
     severity =  Severity.min(severity, taskSeverity);
 
-    HeuristicResult result = new HeuristicResult(HEURISTIC_NAME, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(tasks.length));
-    result.addDetail("Avg spilled records per task", tasks.length == 0 ? "0" : Long.toString(totalSpills/tasks.length));
-    result.addDetail(
-        "Avg output records per task", tasks.length == 0 ? "0" : Long.toString(totalOutputRecords/tasks.length));
-    result.addDetail("Ratio of spilled records to output records", Double.toString(ratioSpills));
+    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
+    result.addResultDetail("Avg spilled records per task",
+        tasks.length == 0 ? "0" : Long.toString(totalSpills / tasks.length));
+    result.addResultDetail("Avg output records per task",
+        tasks.length == 0 ? "0" : Long.toString(totalOutputRecords / tasks.length));
+    result.addResultDetail("Ratio of spilled records to output records", Double.toString(ratioSpills));
 
     return result;
 
   }
 
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
-  }
-
   private Severity getSpillSeverity(double ratioSpills) {
 
     long normalizedSpillRatio = 0;
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperTimeHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperTimeHeuristic.java
index 3c99d5f33..2b7f32564 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/MapperTimeHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/MapperTimeHeuristic.java
@@ -37,7 +37,6 @@
 
 public class MapperTimeHeuristic implements Heuristic<MapReduceApplicationData> {
   private static final Logger logger = Logger.getLogger(MapperTimeHeuristic.class);
-  public static final String HEURISTIC_NAME = "Mapper Time";
 
   // Severity parameters.
   private static final String SHORT_RUNTIME_SEVERITY = "short_runtime_severity_in_min";
@@ -53,39 +52,34 @@ public class MapperTimeHeuristic implements Heuristic<MapReduceApplicationData>
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(SHORT_RUNTIME_SEVERITY) != null) {
-      double[] confShortThreshold = Utils.getParam(paramMap.get(SHORT_RUNTIME_SEVERITY), shortRuntimeLimits.length);
-      if (confShortThreshold != null) {
-        shortRuntimeLimits = confShortThreshold;
-      }
+    double[] confShortThreshold = Utils.getParam(paramMap.get(SHORT_RUNTIME_SEVERITY), shortRuntimeLimits.length);
+    if (confShortThreshold != null) {
+      shortRuntimeLimits = confShortThreshold;
     }
-    logger.info(HEURISTIC_NAME + " will use " + SHORT_RUNTIME_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + SHORT_RUNTIME_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(shortRuntimeLimits));
     for (int i = 0; i < shortRuntimeLimits.length; i++) {
       shortRuntimeLimits[i] = shortRuntimeLimits[i] * Statistics.MINUTE_IN_MS;
     }
 
-    if(paramMap.get(LONG_RUNTIME_SEVERITY) != null) {
-      double[] confLongThreshold = Utils.getParam(paramMap.get(LONG_RUNTIME_SEVERITY), longRuntimeLimits.length);
-      if (confLongThreshold != null) {
-        longRuntimeLimits = confLongThreshold;
-      }
+    double[] confLongThreshold = Utils.getParam(paramMap.get(LONG_RUNTIME_SEVERITY), longRuntimeLimits.length);
+    if (confLongThreshold != null) {
+      longRuntimeLimits = confLongThreshold;
     }
-    logger.info(HEURISTIC_NAME + " will use " + LONG_RUNTIME_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + LONG_RUNTIME_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(longRuntimeLimits));
     for (int i = 0; i < longRuntimeLimits.length; i++) {
       longRuntimeLimits[i] = longRuntimeLimits[i] * Statistics.MINUTE_IN_MS;
     }
 
-    if(paramMap.get(NUM_TASKS_SEVERITY) != null) {
-      double[] confNumTasksThreshold = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
-      if (confNumTasksThreshold != null) {
-        numTasksLimits = confNumTasksThreshold;
-      }
+    double[] confNumTasksThreshold = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
+    if (confNumTasksThreshold != null) {
+      numTasksLimits = confNumTasksThreshold;
     }
-    logger.info(HEURISTIC_NAME + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: "
-        + Arrays.toString(numTasksLimits));
+    logger.info(heuristicName + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: " + Arrays
+        .toString(numTasksLimits));
   }
 
   public MapperTimeHeuristic(HeuristicConfigurationData heuristicConfData) {
@@ -94,8 +88,8 @@ public MapperTimeHeuristic(HeuristicConfigurationData heuristicConfData) {
   }
 
   @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
   }
 
   @Override
@@ -134,13 +128,14 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     Severity longTaskSeverity = longTaskSeverity(tasks.length, averageTimeMs);
     Severity severity = Severity.max(shortTaskSeverity, longTaskSeverity);
 
-    HeuristicResult result = new HeuristicResult(HEURISTIC_NAME, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(tasks.length));
-    result.addDetail("Average task input size", FileUtils.byteCountToDisplaySize(averageSize));
-    result.addDetail("Average task runtime", Statistics.readableTimespan(averageTimeMs));
-    result.addDetail("Max task runtime", Statistics.readableTimespan(taskMaxMs));
-    result.addDetail("Min task runtime", Statistics.readableTimespan(taskMinMs));
+    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
+    result.addResultDetail("Average task input size", FileUtils.byteCountToDisplaySize(averageSize));
+    result.addResultDetail("Average task runtime", Statistics.readableTimespan(averageTimeMs));
+    result.addResultDetail("Max task runtime", Statistics.readableTimespan(taskMaxMs));
+    result.addResultDetail("Min task runtime", Statistics.readableTimespan(taskMinMs));
 
     return result;
   }
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerDataSkewHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerDataSkewHeuristic.java
index d030d265e..19d21bd70 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerDataSkewHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerDataSkewHeuristic.java
@@ -26,10 +26,9 @@
  * This Heuristic analyses the skewness in the reducer input data
  */
 public class ReducerDataSkewHeuristic extends GenericDataSkewHeuristic {
-  public static final String HEURISTIC_NAME = "Reducer Data Skew";
 
   public ReducerDataSkewHeuristic(HeuristicConfigurationData heuristicConfData) {
-    super(MapReduceCounterData.CounterName.REDUCE_SHUFFLE_BYTES, HEURISTIC_NAME, heuristicConfData);
+    super(MapReduceCounterData.CounterName.REDUCE_SHUFFLE_BYTES, heuristicConfData);
   }
 
   @Override
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerGCHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerGCHeuristic.java
index a8fbeeb84..a4891760c 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerGCHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerGCHeuristic.java
@@ -22,15 +22,9 @@
 
 
 public class ReducerGCHeuristic extends GenericGCHeuristic {
-  public static final String HEURISTIC_NAME = "Reducer GC";
 
   public ReducerGCHeuristic(HeuristicConfigurationData _heuristicConfData) {
-    super(HEURISTIC_NAME, _heuristicConfData);
-  }
-
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+    super(_heuristicConfData);
   }
 
   @Override
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerMemoryHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerMemoryHeuristic.java
index 7a1e805d0..1976e4637 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerMemoryHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerMemoryHeuristic.java
@@ -22,16 +22,10 @@
 
 
 public class ReducerMemoryHeuristic extends GenericMemoryHeuristic {
-  public static final String HEURISTIC_NAME = "Reducer Memory";
   public static final String REDUCER_MEMORY_CONF = "mapreduce.reduce.memory.mb";
 
   public ReducerMemoryHeuristic(HeuristicConfigurationData _heuristicConfData) {
-    super(REDUCER_MEMORY_CONF, HEURISTIC_NAME, _heuristicConfData);
-  }
-
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+    super(REDUCER_MEMORY_CONF, _heuristicConfData);
   }
 
   @Override
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerTimeHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerTimeHeuristic.java
index e026ff459..e55ed8aef 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerTimeHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/ReducerTimeHeuristic.java
@@ -34,7 +34,6 @@
 
 public class ReducerTimeHeuristic implements Heuristic<MapReduceApplicationData> {
   private static final Logger logger = Logger.getLogger(ReducerTimeHeuristic.class);
-  public static final String HEURISTIC_NAME = "Reducer Time";
 
   // Severity parameters.
   private static final String SHORT_RUNTIME_SEVERITY = "short_runtime_severity_in_min";
@@ -50,39 +49,34 @@ public class ReducerTimeHeuristic implements Heuristic<MapReduceApplicationData>
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(SHORT_RUNTIME_SEVERITY) != null) {
-      double[] confShortRuntimeLimits = Utils.getParam(paramMap.get(SHORT_RUNTIME_SEVERITY), shortRuntimeLimits.length);
-      if (confShortRuntimeLimits != null) {
-        shortRuntimeLimits = confShortRuntimeLimits;
-      }
+    double[] confShortRuntimeLimits = Utils.getParam(paramMap.get(SHORT_RUNTIME_SEVERITY), shortRuntimeLimits.length);
+    if (confShortRuntimeLimits != null) {
+      shortRuntimeLimits = confShortRuntimeLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + SHORT_RUNTIME_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + SHORT_RUNTIME_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(shortRuntimeLimits));
     for (int i = 0; i < shortRuntimeLimits.length; i++) {
       shortRuntimeLimits[i] = shortRuntimeLimits[i] * Statistics.MINUTE_IN_MS;
     }
 
-    if(paramMap.get(LONG_RUNTIME_SEVERITY) != null) {
-      double[] confLongRuntimeLimitss = Utils.getParam(paramMap.get(LONG_RUNTIME_SEVERITY), longRuntimeLimits.length);
-      if (confLongRuntimeLimitss != null) {
-        longRuntimeLimits = confLongRuntimeLimitss;
-      }
+    double[] confLongRuntimeLimitss = Utils.getParam(paramMap.get(LONG_RUNTIME_SEVERITY), longRuntimeLimits.length);
+    if (confLongRuntimeLimitss != null) {
+      longRuntimeLimits = confLongRuntimeLimitss;
     }
-    logger.info(HEURISTIC_NAME + " will use " + LONG_RUNTIME_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + LONG_RUNTIME_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(longRuntimeLimits));
     for (int i = 0; i < longRuntimeLimits.length; i++) {
       longRuntimeLimits[i] = longRuntimeLimits[i] * Statistics.MINUTE_IN_MS;
     }
 
-    if(paramMap.get(NUM_TASKS_SEVERITY) != null) {
-      double[] confNumTasksLimits = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
-      if (confNumTasksLimits != null) {
-        numTasksLimits = confNumTasksLimits;
-      }
+    double[] confNumTasksLimits = Utils.getParam(paramMap.get(NUM_TASKS_SEVERITY), numTasksLimits.length);
+    if (confNumTasksLimits != null) {
+      numTasksLimits = confNumTasksLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: "
-        + Arrays.toString(numTasksLimits));
+    logger.info(heuristicName + " will use " + NUM_TASKS_SEVERITY + " with the following threshold settings: " + Arrays
+        .toString(numTasksLimits));
 
   }
 
@@ -92,8 +86,8 @@ public ReducerTimeHeuristic(HeuristicConfigurationData heuristicConfData) {
   }
 
   @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
   }
 
   @Override
@@ -129,12 +123,13 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     Severity longTimeSeverity = longTimeSeverity(averageRuntimeMs, tasks.length);
     Severity severity = Severity.max(shortTimeSeverity, longTimeSeverity);
 
-    HeuristicResult result = new HeuristicResult(HEURISTIC_NAME, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(tasks.length));
-    result.addDetail("Average task runtime", Statistics.readableTimespan(averageRuntimeMs));
-    result.addDetail("Max task runtime", Statistics.readableTimespan(taskMaxMs));
-    result.addDetail("Min task runtime", Statistics.readableTimespan(taskMinMs));
+    result.addResultDetail("Number of tasks", Integer.toString(tasks.length));
+    result.addResultDetail("Average task runtime", Statistics.readableTimespan(averageRuntimeMs));
+    result.addResultDetail("Max task runtime", Statistics.readableTimespan(taskMaxMs));
+    result.addResultDetail("Min task runtime", Statistics.readableTimespan(taskMinMs));
     return result;
   }
 
diff --git a/app/com/linkedin/drelephant/mapreduce/heuristics/ShuffleSortHeuristic.java b/app/com/linkedin/drelephant/mapreduce/heuristics/ShuffleSortHeuristic.java
index 2d70a8239..fdc4337c6 100644
--- a/app/com/linkedin/drelephant/mapreduce/heuristics/ShuffleSortHeuristic.java
+++ b/app/com/linkedin/drelephant/mapreduce/heuristics/ShuffleSortHeuristic.java
@@ -37,7 +37,6 @@
  */
 public class ShuffleSortHeuristic implements Heuristic<MapReduceApplicationData> {
   private static final Logger logger = Logger.getLogger(ShuffleSortHeuristic.class);
-  public static final String HEURISTIC_NAME = "Shuffle & Sort";
 
   // Severity parameters.
   private static final String RUNTIME_RATIO_SEVERITY = "runtime_ratio_severity";
@@ -51,24 +50,21 @@ public class ShuffleSortHeuristic implements Heuristic<MapReduceApplicationData>
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(RUNTIME_RATIO_SEVERITY) != null) {
-      double[] confRatioLimitsd = Utils.getParam(paramMap.get(RUNTIME_RATIO_SEVERITY), runtimeRatioLimits.length);
-      if (confRatioLimitsd != null) {
-        runtimeRatioLimits = confRatioLimitsd;
-      }
+    double[] confRatioLimitsd = Utils.getParam(paramMap.get(RUNTIME_RATIO_SEVERITY), runtimeRatioLimits.length);
+    if (confRatioLimitsd != null) {
+      runtimeRatioLimits = confRatioLimitsd;
     }
-    logger.info(HEURISTIC_NAME + " will use " + RUNTIME_RATIO_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + RUNTIME_RATIO_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(runtimeRatioLimits));
 
-    if(paramMap.get(RUNTIME_SEVERITY) != null) {
-      double[] confRuntimeLimits = Utils.getParam(paramMap.get(RUNTIME_SEVERITY), runtimeLimits.length);
-      if (confRuntimeLimits != null) {
-        runtimeLimits = confRuntimeLimits;
-      }
+    double[] confRuntimeLimits = Utils.getParam(paramMap.get(RUNTIME_SEVERITY), runtimeLimits.length);
+    if (confRuntimeLimits != null) {
+      runtimeLimits = confRuntimeLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + RUNTIME_SEVERITY + " with the following threshold settings: "
-        + Arrays.toString(runtimeLimits));
+    logger.info(heuristicName + " will use " + RUNTIME_SEVERITY + " with the following threshold settings: " + Arrays
+        .toString(runtimeLimits));
     for (int i = 0; i < runtimeLimits.length; i++) {
       runtimeLimits[i] = runtimeLimits[i] * Statistics.MINUTE_IN_MS;
     }
@@ -80,8 +76,8 @@ public ShuffleSortHeuristic(HeuristicConfigurationData heuristicConfData) {
   }
 
   @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
   }
 
   @Override
@@ -114,14 +110,15 @@ public HeuristicResult apply(MapReduceApplicationData data) {
     Severity sortSeverity = getShuffleSortSeverity(avgSortTimeMs, avgExecTimeMs);
     Severity severity = Severity.max(shuffleSeverity, sortSeverity);
 
-    HeuristicResult result = new HeuristicResult(HEURISTIC_NAME, severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, Utils.getHeuristicScore(severity, tasks.length));
 
-    result.addDetail("Number of tasks", Integer.toString(data.getReducerData().length));
-    result.addDetail("Average code runtime", Statistics.readableTimespan(avgExecTimeMs));
+    result.addResultDetail("Number of tasks", Integer.toString(data.getReducerData().length));
+    result.addResultDetail("Average code runtime", Statistics.readableTimespan(avgExecTimeMs));
     String shuffleFactor = Statistics.describeFactor(avgShuffleTimeMs, avgExecTimeMs, "x");
-    result.addDetail("Average shuffle time", Statistics.readableTimespan(avgShuffleTimeMs) + " " + shuffleFactor);
+    result.addResultDetail("Average shuffle time", Statistics.readableTimespan(avgShuffleTimeMs) + " " + shuffleFactor);
     String sortFactor = Statistics.describeFactor(avgSortTimeMs, avgExecTimeMs, "x");
-    result.addDetail("Average sort time", Statistics.readableTimespan(avgSortTimeMs) + " " + sortFactor);
+    result.addResultDetail("Average sort time", Statistics.readableTimespan(avgSortTimeMs) + " " + sortFactor);
 
     return result;
   }
diff --git a/app/com/linkedin/drelephant/math/Statistics.java b/app/com/linkedin/drelephant/math/Statistics.java
index bb6a2a515..aab1e1783 100644
--- a/app/com/linkedin/drelephant/math/Statistics.java
+++ b/app/com/linkedin/drelephant/math/Statistics.java
@@ -111,6 +111,12 @@ private static long[] toArray(List<Long> input) {
     return result;
   }
 
+  /**
+   * Compute average for the given array of long
+   *
+   * @param values the values
+   * @return The average(values)
+   */
   public static long average(long[] values) {
     //Find average
     double sum = 0d;
@@ -120,6 +126,12 @@ public static long average(long[] values) {
     return (long) (sum / (double) values.length);
   }
 
+  /**
+   * Compute average for a List of long values
+   *
+   * @param values the values
+   * @return The average(values)
+   */
   public static long average(List<Long> values) {
     //Find average
     double sum = 0d;
@@ -129,6 +141,12 @@ public static long average(List<Long> values) {
     return (long) (sum / (double) values.size());
   }
 
+  /**
+   * Find the median of the given list
+   *
+   * @param values The values
+   * @return The median(values)
+   */
   public static long median(List<Long> values) {
     if (values.size() == 0) {
       throw new IllegalArgumentException("Median of an empty list is not defined.");
@@ -142,6 +160,16 @@ public static long median(List<Long> values) {
     }
   }
 
+  /**
+   * Compute ratio and display it with a suffix.
+   *
+   * Example: Average sort time	 (0.14x)
+   *
+   * @param value The value to be compared
+   * @param compare The value compared against
+   * @param suffix The suffix string
+   * @return The ratio followed by suffix
+   */
   public static String describeFactor(long value, long compare, String suffix) {
     double factor = (double) value / (double) compare;
     if (Double.isNaN(factor)) {
@@ -150,6 +178,12 @@ public static String describeFactor(long value, long compare, String suffix) {
     return "(" + String.format("%.2f", factor) + suffix + ")";
   }
 
+  /**
+   * Convert milliseconds to readable value
+   *
+   * @param milliseconds The number of milliseconds
+   * @return A String of readable time
+   */
   public static String readableTimespan(long milliseconds) {
     if (milliseconds == 0) {
       return "0 sec";
@@ -162,13 +196,13 @@ public static String readableTimespan(long milliseconds) {
     seconds %= 60;
     StringBuilder sb = new StringBuilder();
     if (hours > 0) {
-      sb.append(hours).append("hr ");
+      sb.append(hours).append(" hr");
     }
     if (minutes > 0) {
-      sb.append(minutes).append("min ");
+      sb.append(minutes).append(" min");
     }
     if (seconds > 0) {
-      sb.append(seconds).append("sec ");
+      sb.append(seconds).append(" sec");
     }
     return sb.toString().trim();
   }
diff --git a/app/com/linkedin/drelephant/notifications/EmailThread.java b/app/com/linkedin/drelephant/notifications/EmailThread.java
index 4e9541f1d..2833457c5 100644
--- a/app/com/linkedin/drelephant/notifications/EmailThread.java
+++ b/app/com/linkedin/drelephant/notifications/EmailThread.java
@@ -17,7 +17,7 @@
 package com.linkedin.drelephant.notifications;
 
 import com.linkedin.drelephant.analysis.Severity;
-import model.JobResult;
+import models.AppResult;
 import org.apache.commons.mail.DefaultAuthenticator;
 import org.apache.commons.mail.EmailException;
 import org.apache.commons.mail.HtmlEmail;
@@ -30,7 +30,7 @@
 
 public class EmailThread extends Thread {
 
-  private LinkedBlockingQueue<JobResult> _resultQueue;
+  private LinkedBlockingQueue<AppResult> _resultQueue;
   private AtomicBoolean _running = new AtomicBoolean(true);
 
   private String _smtpHost;
@@ -40,7 +40,7 @@ public class EmailThread extends Thread {
 
   public EmailThread() {
     setName("Email Thread");
-    _resultQueue = new LinkedBlockingQueue<JobResult>();
+    _resultQueue = new LinkedBlockingQueue<AppResult>();
     _smtpHost = Play.application().configuration().getString("smtp.host");
     _smtpPort = Play.application().configuration().getInt("smtp.port");
     String smtpUser = Play.application().configuration().getString("smtp.user");
@@ -54,7 +54,7 @@ public EmailThread() {
   @Override
   public void run() {
     while (_running.get()) {
-      JobResult result = null;
+      AppResult result = null;
       while (result == null && _running.get()) {
         try {
           result = _resultQueue.take();
@@ -80,7 +80,7 @@ public void kill() {
     this.interrupt();
   }
 
-  public void enqueue(JobResult result) {
+  public void enqueue(AppResult result) {
     try {
       _resultQueue.put(result);
     } catch (InterruptedException e) {
@@ -88,7 +88,7 @@ public void enqueue(JobResult result) {
     }
   }
 
-  private void sendCriticalEmail(JobResult result) {
+  private void sendCriticalEmail(AppResult result) {
     try {
       //Generate content
       String html = emailcritical.render(result).body();
diff --git a/app/com/linkedin/drelephant/spark/heuristics/BestPropertiesConventionHeuristic.java b/app/com/linkedin/drelephant/spark/heuristics/BestPropertiesConventionHeuristic.java
index 252c5d969..46c0609c2 100644
--- a/app/com/linkedin/drelephant/spark/heuristics/BestPropertiesConventionHeuristic.java
+++ b/app/com/linkedin/drelephant/spark/heuristics/BestPropertiesConventionHeuristic.java
@@ -36,7 +36,6 @@
 public class BestPropertiesConventionHeuristic implements Heuristic<SparkApplicationData> {
   private static final Logger logger = Logger.getLogger(BestPropertiesConventionHeuristic.class);
 
-  public static final String HEURISTIC_NAME = "Spark Configuration Best Practice";
   public static final String SPARK_SERIALIZER = "spark.serializer";
   public static final String SPARK_DRIVER_MEMORY = "spark.driver.memory";
   public static final String SPARK_SHUFFLE_MANAGER = "spark.shuffle.manager";
@@ -54,23 +53,20 @@ public class BestPropertiesConventionHeuristic implements Heuristic<SparkApplica
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(NUM_CORE_SEVERITY) != null) {
-      double[] confNumCoreLimit = Utils.getParam(paramMap.get(NUM_CORE_SEVERITY), numCoreLimit.length);
-      if (confNumCoreLimit != null) {
-        numCoreLimit = confNumCoreLimit;
-      }
+    double[] confNumCoreLimit = Utils.getParam(paramMap.get(NUM_CORE_SEVERITY), numCoreLimit.length);
+    if (confNumCoreLimit != null) {
+      numCoreLimit = confNumCoreLimit;
     }
-    logger.info(HEURISTIC_NAME + " will use " + NUM_CORE_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + NUM_CORE_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(numCoreLimit));
 
-    if(paramMap.get(DRIVER_MEM_SEVERITY) != null) {
-      double[] confDriverMemLimits = Utils.getParam(paramMap.get(DRIVER_MEM_SEVERITY), driverMemLimits.length);
-      if (confDriverMemLimits != null) {
-        driverMemLimits = confDriverMemLimits;
-      }
+    double[] confDriverMemLimits = Utils.getParam(paramMap.get(DRIVER_MEM_SEVERITY), driverMemLimits.length);
+    if (confDriverMemLimits != null) {
+      driverMemLimits = confDriverMemLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + DRIVER_MEM_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + DRIVER_MEM_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(driverMemLimits));
     for (int i = 0; i < driverMemLimits.length; i++) {
       driverMemLimits[i] = (double) MemoryFormatUtils.stringToBytes(Double.toString(driverMemLimits[i]) + "G");
@@ -82,6 +78,11 @@ public BestPropertiesConventionHeuristic(HeuristicConfigurationData heuristicCon
     loadParameters();
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(SparkApplicationData data) {
     SparkEnvironmentData env = data.getEnvironmentData();
@@ -97,13 +98,14 @@ public HeuristicResult apply(SparkApplicationData data) {
     Severity sortSeverity = binarySeverity("sort", sparkShuffleManager, true, Severity.MODERATE);
     Severity executorCoreSeverity = getCoreNumSeverity(coreNum);
 
-    HeuristicResult result = new HeuristicResult(getHeuristicName(),
-        Severity.max(kryoSeverity, driverMemSeverity, sortSeverity, executorCoreSeverity));
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), Severity.max(kryoSeverity, driverMemSeverity, sortSeverity,
+        executorCoreSeverity), 0);
 
-    result.addDetail(SPARK_SERIALIZER, propertyToString(sparkSerializer));
-    result.addDetail(SPARK_DRIVER_MEMORY, propertyToString(sparkDriverMemory));
-    result.addDetail(SPARK_SHUFFLE_MANAGER, propertyToString(sparkShuffleManager));
-    result.addDetail(SPARK_EXECUTOR_CORES, propertyToString(sparkExecutorCores));
+    result.addResultDetail(SPARK_SERIALIZER, propertyToString(sparkSerializer));
+    result.addResultDetail(SPARK_DRIVER_MEMORY, propertyToString(sparkDriverMemory));
+    result.addResultDetail(SPARK_SHUFFLE_MANAGER, propertyToString(sparkShuffleManager));
+    result.addResultDetail(SPARK_EXECUTOR_CORES, propertyToString(sparkExecutorCores));
 
     return result;
   }
@@ -139,11 +141,6 @@ private static Severity binarySeverity(String expectedValue, String actualValue,
   }
 
   private static String propertyToString(String val) {
-    return val == null ? "not presented, using default" : val;
-  }
-
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
+    return val == null ? "Not presented. Using default" : val;
   }
 }
diff --git a/app/com/linkedin/drelephant/spark/heuristics/EventLogLimitHeuristic.java b/app/com/linkedin/drelephant/spark/heuristics/EventLogLimitHeuristic.java
index aeff74578..201b756c2 100644
--- a/app/com/linkedin/drelephant/spark/heuristics/EventLogLimitHeuristic.java
+++ b/app/com/linkedin/drelephant/spark/heuristics/EventLogLimitHeuristic.java
@@ -28,29 +28,29 @@
  * approve it.
  */
 public class EventLogLimitHeuristic implements Heuristic<SparkApplicationData> {
-  public static final String HEURISTIC_NAME = "Spark Event Log Limit";
   private HeuristicConfigurationData _heuristicConfData;
 
   public EventLogLimitHeuristic(HeuristicConfigurationData heuristicConfData) {
     this._heuristicConfData = heuristicConfData;
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(SparkApplicationData data) {
     Severity severity = getSeverity(data);
-    HeuristicResult result = new HeuristicResult(getHeuristicName(), severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, 0);
     if (severity == Severity.CRITICAL) {
-      result.addDetail("Spark job's event log passes the limit. No actual log data is fetched."
-          + " All other heuristic rules will not make sense.");
+      result.addResultDetail("Large Log File", "Spark job's event log passes the limit. No actual log data is fetched."
+          + " All other heuristic rules will not make sense.", null);
     }
     return result;
   }
 
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
-  }
-
   private Severity getSeverity(SparkApplicationData data) {
     if (data.isThrottled()) {
       return Severity.CRITICAL;
diff --git a/app/com/linkedin/drelephant/spark/heuristics/ExecutorLoadHeuristic.java b/app/com/linkedin/drelephant/spark/heuristics/ExecutorLoadHeuristic.java
index 9fd778192..086d5fe33 100644
--- a/app/com/linkedin/drelephant/spark/heuristics/ExecutorLoadHeuristic.java
+++ b/app/com/linkedin/drelephant/spark/heuristics/ExecutorLoadHeuristic.java
@@ -38,7 +38,6 @@
  */
 public class ExecutorLoadHeuristic implements Heuristic<SparkApplicationData> {
   private static final Logger logger = Logger.getLogger(ExecutorLoadHeuristic.class);
-  public static final String HEURISTIC_NAME = "Spark Executor Load Balance";
   private static final long MEMORY_OBSERVATION_THRESHOLD = MemoryFormatUtils.stringToBytes("1 MB");
 
   // Severity parameters.
@@ -53,24 +52,21 @@ public class ExecutorLoadHeuristic implements Heuristic<SparkApplicationData> {
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(LOOSER_METRIC_DEV_SEVERITY) != null) {
-      double[] confLooserMetDevLimits = Utils.getParam(paramMap.get(LOOSER_METRIC_DEV_SEVERITY),
-          looserMetDevLimits.length);
-      if (confLooserMetDevLimits != null) {
-        looserMetDevLimits = confLooserMetDevLimits;
-      }
+    double[] confLooserMetDevLimits = Utils.getParam(paramMap.get(LOOSER_METRIC_DEV_SEVERITY),
+        looserMetDevLimits.length);
+    if (confLooserMetDevLimits != null) {
+      looserMetDevLimits = confLooserMetDevLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + LOOSER_METRIC_DEV_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + LOOSER_METRIC_DEV_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(looserMetDevLimits));
 
-    if(paramMap.get(METRIC_DEV_SEVERITY) != null) {
-      double[] confMetDevLimits = Utils.getParam(paramMap.get(METRIC_DEV_SEVERITY), metDevLimits.length);
-      if (confMetDevLimits != null) {
-        metDevLimits = confMetDevLimits;
-      }
+    double[] confMetDevLimits = Utils.getParam(paramMap.get(METRIC_DEV_SEVERITY), metDevLimits.length);
+    if (confMetDevLimits != null) {
+      metDevLimits = confMetDevLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + METRIC_DEV_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + METRIC_DEV_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(metDevLimits));
   }
 
@@ -137,6 +133,11 @@ public double getDeviationFactor() {
     }
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(SparkApplicationData data) {
     SparkExecutorData executorData = data.getExecutorData();
@@ -175,22 +176,21 @@ public HeuristicResult apply(SparkApplicationData data) {
     Severity severity = Severity.max(getLooserMetricDeviationSeverity(peakMems), getMetricDeviationSeverity(durations),
         getMetricDeviationSeverity(inputBytes), getLooserMetricDeviationSeverity(outputBytes));
 
-    HeuristicResult result = new HeuristicResult(getHeuristicName(), severity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), severity, 0);
 
-    result.addDetail("Average peak storage memory", String
-        .format("%s (%s~%s)", MemoryFormatUtils.bytesToString(peakMems.getAvg()),
+    result.addResultDetail("Average peak storage memory",
+        String.format("%s (%s~%s)", MemoryFormatUtils.bytesToString(peakMems.getAvg()),
             MemoryFormatUtils.bytesToString(peakMems.getMin()), MemoryFormatUtils.bytesToString(peakMems.getMax())));
-    result.addDetail("Average runtime", String
-        .format("%s (%s~%s)", Statistics.readableTimespan(durations.getAvg()),
+    result.addResultDetail("Average runtime",
+        String.format("%s (%s~%s)", Statistics.readableTimespan(durations.getAvg()),
             Statistics.readableTimespan(durations.getMin()), Statistics.readableTimespan(durations.getMax())));
-    result.addDetail("Average input size", String
-        .format("%s (%s~%s)", MemoryFormatUtils.bytesToString(inputBytes.getAvg()),
-            MemoryFormatUtils.bytesToString(inputBytes.getMin()),
-            MemoryFormatUtils.bytesToString(inputBytes.getMax())));
-    result.addDetail("Average output size", String
-        .format("%s (%s~%s)", MemoryFormatUtils.bytesToString(outputBytes.getAvg()),
-            MemoryFormatUtils.bytesToString(outputBytes.getMin()),
-            MemoryFormatUtils.bytesToString(outputBytes.getMax())));
+    result.addResultDetail("Average input size",
+        String.format("%s (%s~%s)", MemoryFormatUtils.bytesToString(inputBytes.getAvg()),
+            MemoryFormatUtils.bytesToString(inputBytes.getMin()), MemoryFormatUtils.bytesToString(inputBytes.getMax())));
+    result.addResultDetail("Average output size",
+        String.format("%s (%s~%s)", MemoryFormatUtils.bytesToString(outputBytes.getAvg()),
+            MemoryFormatUtils.bytesToString(outputBytes.getMin()), MemoryFormatUtils.bytesToString(outputBytes.getMax())));
 
     return result;
   }
@@ -214,8 +214,4 @@ private Severity getMetricDeviationSeverity(ValueObserver ob) {
         diffFactor, metDevLimits[0], metDevLimits[1], metDevLimits[2], metDevLimits[3]);
   }
 
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
-  }
 }
diff --git a/app/com/linkedin/drelephant/spark/heuristics/JobRuntimeHeuristic.java b/app/com/linkedin/drelephant/spark/heuristics/JobRuntimeHeuristic.java
index 3b3520d0e..e88283dd7 100644
--- a/app/com/linkedin/drelephant/spark/heuristics/JobRuntimeHeuristic.java
+++ b/app/com/linkedin/drelephant/spark/heuristics/JobRuntimeHeuristic.java
@@ -38,7 +38,6 @@
  */
 public class JobRuntimeHeuristic implements Heuristic<SparkApplicationData> {
   private static final Logger logger = Logger.getLogger(JobRuntimeHeuristic.class);
-  public static final String HEURISTIC_NAME = "Spark Job Runtime";
 
   // Severity parameters.
   private static final String AVG_JOB_FAILURE_SEVERITY = "avg_job_failure_rate_severity";
@@ -52,25 +51,22 @@ public class JobRuntimeHeuristic implements Heuristic<SparkApplicationData> {
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(AVG_JOB_FAILURE_SEVERITY) != null) {
-      double[] confAvgJobFailureLimits = Utils.getParam(paramMap.get(AVG_JOB_FAILURE_SEVERITY),
-          avgJobFailureLimits.length);
-      if (confAvgJobFailureLimits != null) {
-        avgJobFailureLimits = confAvgJobFailureLimits;
-      }
+    double[] confAvgJobFailureLimits = Utils.getParam(paramMap.get(AVG_JOB_FAILURE_SEVERITY),
+        avgJobFailureLimits.length);
+    if (confAvgJobFailureLimits != null) {
+      avgJobFailureLimits = confAvgJobFailureLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + AVG_JOB_FAILURE_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + AVG_JOB_FAILURE_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(avgJobFailureLimits));
 
-    if(paramMap.get(SINGLE_JOB_FAILURE_SEVERITY) != null) {
-      double[] confJobFailureLimits = Utils.getParam(paramMap.get(SINGLE_JOB_FAILURE_SEVERITY),
-          jobFailureLimits.length);
-      if (confJobFailureLimits != null) {
-        jobFailureLimits = confJobFailureLimits;
-      }
+    double[] confJobFailureLimits = Utils.getParam(paramMap.get(SINGLE_JOB_FAILURE_SEVERITY),
+        jobFailureLimits.length);
+    if (confJobFailureLimits != null) {
+      jobFailureLimits = confJobFailureLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + SINGLE_JOB_FAILURE_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + SINGLE_JOB_FAILURE_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(jobFailureLimits));
   }
 
@@ -79,6 +75,11 @@ public JobRuntimeHeuristic(HeuristicConfigurationData heuristicConfData) {
     loadParameters();
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(SparkApplicationData data) {
     SparkJobProgressData jobProgressData = data.getJobProgressData();
@@ -106,22 +107,18 @@ public HeuristicResult apply(SparkApplicationData data) {
       endSeverity = Severity.max(endSeverity, severity);
     }
 
-    HeuristicResult result = new HeuristicResult(getHeuristicName(), endSeverity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), endSeverity, 0);
 
-    result.addDetail("Spark completed jobs number", String.valueOf(completedJobs.size()));
-    result.addDetail("Spark failed jobs number", String.valueOf(failedJobs.size()));
-    result.addDetail("Spark failed jobs list", getJobListString(jobProgressData.getFailedJobDescriptions()));
-    result.addDetail("Spark average job failure rate", String.format("%.3f", avgJobFailureRate));
-    result.addDetail("Spark jobs with high task failure rate", getJobListString(highFailureRateJobs));
+    result.addResultDetail("Spark completed jobs number", String.valueOf(completedJobs.size()));
+    result.addResultDetail("Spark failed jobs number", String.valueOf(failedJobs.size()));
+    result.addResultDetail("Spark failed jobs list", getJobListString(jobProgressData.getFailedJobDescriptions()));
+    result.addResultDetail("Spark average job failure rate", String.format("%.3f", avgJobFailureRate));
+    result.addResultDetail("Spark jobs with high task failure rate", getJobListString(highFailureRateJobs));
 
     return result;
   }
 
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
-  }
-
   private Severity getAvgJobFailureRateSeverity(double rate) {
     return Severity.getSeverityAscending(
         rate, avgJobFailureLimits[0], avgJobFailureLimits[1], avgJobFailureLimits[2], avgJobFailureLimits[3]);
diff --git a/app/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristic.java b/app/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristic.java
index 08690768a..6224b787a 100644
--- a/app/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristic.java
+++ b/app/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristic.java
@@ -37,7 +37,6 @@
  * This heuristic checks for memory consumption.
  */
 public class MemoryLimitHeuristic implements Heuristic<SparkApplicationData> {
-  public static final String HEURISTIC_NAME = "Spark Memory Limit";
   private static final Logger logger = Logger.getLogger(MemoryLimitHeuristic.class);
 
   public static final String SPARK_EXECUTOR_MEMORY = "spark.executor.memory";
@@ -59,6 +58,7 @@ public class MemoryLimitHeuristic implements Heuristic<SparkApplicationData> {
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
     if(paramMap.get(MEM_UTILIZATION_SEVERITY) != null) {
       double[] confMemUtilLimits = Utils.getParam(paramMap.get(MEM_UTILIZATION_SEVERITY), memUtilLimits.length);
@@ -66,7 +66,7 @@ private void loadParameters() {
         memUtilLimits = confMemUtilLimits;
       }
     }
-    logger.info(HEURISTIC_NAME + " will use " + MEM_UTILIZATION_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + MEM_UTILIZATION_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(memUtilLimits));
 
     if(paramMap.get(TOTAL_MEM_SEVERITY) != null) {
@@ -75,7 +75,7 @@ private void loadParameters() {
         totalMemLimits = confTotalMemLimits;
       }
     }
-    logger.info(HEURISTIC_NAME + " will use " + TOTAL_MEM_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + TOTAL_MEM_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(totalMemLimits));
     for (int i = 0; i < totalMemLimits.length; i++) {
       totalMemLimits[i] = MemoryFormatUtils.stringToBytes(totalMemLimits[i] + "T");
@@ -87,6 +87,11 @@ public MemoryLimitHeuristic(HeuristicConfigurationData heuristicConfData) {
     loadParameters();
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(SparkApplicationData data) {
 
@@ -104,15 +109,16 @@ public HeuristicResult apply(SparkApplicationData data) {
     Severity memoryUtilizationServerity = getMemoryUtilizationSeverity(peakMem, totalStorageMem);
 
     HeuristicResult result =
-        new HeuristicResult(getHeuristicName(), Severity.max(totalMemorySeverity, memoryUtilizationServerity));
+        new HeuristicResult(_heuristicConfData.getClassName(), _heuristicConfData.getHeuristicName(),
+            Severity.max(totalMemorySeverity, memoryUtilizationServerity), 0);
 
-    result.addDetail("Total executor memory allocated", String
+    result.addResultDetail("Total executor memory allocated", String
         .format("%s (%s x %s)", MemoryFormatUtils.bytesToString(totalExecutorMem),
             MemoryFormatUtils.bytesToString(perExecutorMem), executorNum));
-    result.addDetail("Total driver memory allocated", MemoryFormatUtils.bytesToString(totalDriverMem));
-    result.addDetail("Total memory allocated for storage", MemoryFormatUtils.bytesToString(totalStorageMem));
-    result.addDetail("Total memory used at peak", MemoryFormatUtils.bytesToString(peakMem));
-    result.addDetail("Memory utilization rate", String.format("%1.3f", peakMem * 1.0 / totalStorageMem));
+    result.addResultDetail("Total driver memory allocated", MemoryFormatUtils.bytesToString(totalDriverMem));
+    result.addResultDetail("Total memory allocated for storage", MemoryFormatUtils.bytesToString(totalStorageMem));
+    result.addResultDetail("Total memory used at peak", MemoryFormatUtils.bytesToString(peakMem));
+    result.addResultDetail("Memory utilization rate", String.format("%1.3f", peakMem * 1.0 / totalStorageMem));
     return result;
   }
 
@@ -184,11 +190,6 @@ private static long getTotalStorageMem(SparkApplicationData data) {
     return totalStorageMem;
   }
 
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
-  }
-
   public Severity getTotalMemorySeverity(long memory) {
     return Severity.getSeverityAscending(memory, totalMemLimits[0], totalMemLimits[1], totalMemLimits[2],
         totalMemLimits[3]);
diff --git a/app/com/linkedin/drelephant/spark/heuristics/StageRuntimeHeuristic.java b/app/com/linkedin/drelephant/spark/heuristics/StageRuntimeHeuristic.java
index 7064b946d..a7197bdd4 100644
--- a/app/com/linkedin/drelephant/spark/heuristics/StageRuntimeHeuristic.java
+++ b/app/com/linkedin/drelephant/spark/heuristics/StageRuntimeHeuristic.java
@@ -40,7 +40,6 @@
  */
 public class StageRuntimeHeuristic implements Heuristic<SparkApplicationData> {
   private static final Logger logger = Logger.getLogger(StageRuntimeHeuristic.class);
-  public static final String HEURISTIC_NAME = "Spark Stage Runtime";
 
   // Severity parameters
   private static final String STAGE_FAILURE_SEVERITY = "stage_failure_rate_severity";
@@ -56,34 +55,29 @@ public class StageRuntimeHeuristic implements Heuristic<SparkApplicationData> {
 
   private void loadParameters() {
     Map<String, String> paramMap = _heuristicConfData.getParamMap();
+    String heuristicName = _heuristicConfData.getHeuristicName();
 
-    if(paramMap.get(STAGE_FAILURE_SEVERITY) != null) {
-      double[] confStageFailRateLimits = Utils.getParam(paramMap.get(STAGE_FAILURE_SEVERITY),
-          stageFailRateLimits.length);
-      if (confStageFailRateLimits != null) {
-        stageFailRateLimits = confStageFailRateLimits;
-      }
+    double[] confStageFailRateLimits = Utils.getParam(paramMap.get(STAGE_FAILURE_SEVERITY),
+        stageFailRateLimits.length);
+    if (confStageFailRateLimits != null) {
+      stageFailRateLimits = confStageFailRateLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + STAGE_FAILURE_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + STAGE_FAILURE_SEVERITY + " with the following threshold settings: "
             + Arrays.toString(stageFailRateLimits));
 
-    if(paramMap.get(SINGLE_STAGE_FAILURE_SEVERITY) != null) {
-      double[] confSingleFailLimits = Utils.getParam(paramMap.get(SINGLE_STAGE_FAILURE_SEVERITY),
-          singleStageFailLimits.length);
-      if (confSingleFailLimits != null) {
-        singleStageFailLimits = confSingleFailLimits;
-      }
+    double[] confSingleFailLimits = Utils.getParam(paramMap.get(SINGLE_STAGE_FAILURE_SEVERITY),
+        singleStageFailLimits.length);
+    if (confSingleFailLimits != null) {
+      singleStageFailLimits = confSingleFailLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + SINGLE_STAGE_FAILURE_SEVERITY + " with the following threshold"
+    logger.info(heuristicName + " will use " + SINGLE_STAGE_FAILURE_SEVERITY + " with the following threshold"
         + " settings: " + Arrays.toString(singleStageFailLimits));
 
-    if(paramMap.get(STAGE_RUNTIME_SEVERITY) != null) {
-      double[] confStageRuntimeLimits = Utils.getParam(paramMap.get(STAGE_RUNTIME_SEVERITY), stageRuntimeLimits.length);
-      if (confStageRuntimeLimits != null) {
-        stageRuntimeLimits = confStageRuntimeLimits;
-      }
+    double[] confStageRuntimeLimits = Utils.getParam(paramMap.get(STAGE_RUNTIME_SEVERITY), stageRuntimeLimits.length);
+    if (confStageRuntimeLimits != null) {
+      stageRuntimeLimits = confStageRuntimeLimits;
     }
-    logger.info(HEURISTIC_NAME + " will use " + STAGE_RUNTIME_SEVERITY + " with the following threshold settings: "
+    logger.info(heuristicName + " will use " + STAGE_RUNTIME_SEVERITY + " with the following threshold settings: "
         + Arrays.toString(stageRuntimeLimits));
     for (int i = 0; i < stageRuntimeLimits.length; i++) {
       stageRuntimeLimits[i] = stageRuntimeLimits[i] * Statistics.MINUTE_IN_MS;
@@ -95,6 +89,11 @@ public StageRuntimeHeuristic(HeuristicConfigurationData heuristicConfData) {
     loadParameters();
   }
 
+  @Override
+  public HeuristicConfigurationData getHeuristicConfData() {
+    return _heuristicConfData;
+  }
+
   @Override
   public HeuristicResult apply(SparkApplicationData data) {
     SparkJobProgressData jobProgressData = data.getJobProgressData();
@@ -129,21 +128,17 @@ public HeuristicResult apply(SparkApplicationData data) {
       endSeverity = Severity.max(endSeverity, tasksFailureRateSeverity, runtimeSeverity);
     }
 
-    HeuristicResult result = new HeuristicResult(getHeuristicName(), endSeverity);
+    HeuristicResult result = new HeuristicResult(_heuristicConfData.getClassName(),
+        _heuristicConfData.getHeuristicName(), endSeverity, 0);
 
-    result.addDetail("Spark stage completed", String.valueOf(completedStages.size()));
-    result.addDetail("Spark stage failed", String.valueOf(failedStages.size()));
-    result.addDetail("Spark average stage failure rate", String.format("%.3f", avgStageFailureRate));
-    result.addDetail("Spark problematic stages:", getStageListString(problematicStages));
+    result.addResultDetail("Spark stage completed", String.valueOf(completedStages.size()));
+    result.addResultDetail("Spark stage failed", String.valueOf(failedStages.size()));
+    result.addResultDetail("Spark average stage failure rate", String.format("%.3f", avgStageFailureRate));
+    result.addResultDetail("Spark problematic stages:", getStageListString(problematicStages));
 
     return result;
   }
 
-  @Override
-  public String getHeuristicName() {
-    return HEURISTIC_NAME;
-  }
-
   private Severity getStageRuntimeSeverity(long runtime) {
     return Severity.getSeverityDescending(
         runtime, stageRuntimeLimits[0], stageRuntimeLimits[1], stageRuntimeLimits[2], stageRuntimeLimits[3]);
diff --git a/app/com/linkedin/drelephant/util/InfoExtractor.java b/app/com/linkedin/drelephant/util/InfoExtractor.java
index fd3e37a34..b824f7161 100644
--- a/app/com/linkedin/drelephant/util/InfoExtractor.java
+++ b/app/com/linkedin/drelephant/util/InfoExtractor.java
@@ -26,7 +26,7 @@
 import org.apache.commons.lang.StringUtils;
 import org.apache.log4j.Logger;
 
-import model.JobResult;
+import models.AppResult;
 
 import com.linkedin.drelephant.mapreduce.data.MapReduceApplicationData;
 
@@ -43,9 +43,10 @@ public class InfoExtractor {
   private static final String AZKABAN_JOB_URL = "azkaban.link.job.url";
   private static final String AZKABAN_EXECUTION_URL = "azkaban.link.execution.url";
   private static final String AZKABAN_ATTEMPT_URL = "azkaban.link.attempt.url";
+  private static final String AZKABAN_JOB_NAME = "azkaban.job.id";
 
   // TODO: this utils class is not ideal, probably should merge retrieve URLs logics directly into the data interface?
-  public static void retrieveURLs(JobResult result, HadoopApplicationData data) {
+  public static void retrieveURLs(AppResult result, HadoopApplicationData data) {
     if (data instanceof MapReduceApplicationData) {
       retrieveURLs(result, (MapReduceApplicationData) data);
     } else if (data instanceof SparkApplicationData) {
@@ -53,19 +54,42 @@ public static void retrieveURLs(JobResult result, HadoopApplicationData data) {
     }
   }
 
-  public static void retrieveURLs(JobResult result, MapReduceApplicationData appData) {
+  public static void retrieveURLs(AppResult result, MapReduceApplicationData appData) {
     Properties jobConf = appData.getConf();
     String jobId = appData.getJobId();
-    result.jobExecUrl = truncate(jobConf.getProperty(AZKABAN_ATTEMPT_URL), jobId);
-    // For jobs launched by Azkaban, we consider different attempts to be
-    // different jobs
-    result.jobUrl = truncate(jobConf.getProperty(AZKABAN_JOB_URL), jobId);
-    result.flowExecUrl = truncate(jobConf.getProperty(AZKABAN_EXECUTION_URL), jobId);
-    result.flowUrl = truncate(jobConf.getProperty(AZKABAN_WORKFLOW_URL), jobId);
+
+    result.jobExecId = jobConf.getProperty(AZKABAN_ATTEMPT_URL) != null ?
+        truncate(jobConf.getProperty(AZKABAN_ATTEMPT_URL), jobId) : "";
+    // For jobs launched by Azkaban, we consider different attempts to be different jobs
+    result.jobDefId = jobConf.getProperty(AZKABAN_JOB_URL) != null ?
+        truncate(jobConf.getProperty(AZKABAN_JOB_URL), jobId) : "";
+    result.flowExecId = jobConf.getProperty(AZKABAN_EXECUTION_URL) != null ?
+        truncate(jobConf.getProperty(AZKABAN_EXECUTION_URL), jobId) : "";
+    result.flowDefId = jobConf.getProperty(AZKABAN_WORKFLOW_URL) != null ?
+        truncate(jobConf.getProperty(AZKABAN_WORKFLOW_URL), jobId) : "";
+
+    // For Azkaban, The url and ids are the same
+    result.jobExecUrl = result.jobExecId;
+    result.jobDefUrl = result.jobDefId;
+    result.flowExecUrl = result.flowExecId;
+    result.flowDefUrl = result.flowDefId;
+
+    if (!result.jobExecId.isEmpty()) {
+      result.scheduler = "azkaban";
+      result.workflowDepth = 0;
+    }
+    result.jobName = jobConf.getProperty(AZKABAN_JOB_NAME) != null ? jobConf.getProperty(AZKABAN_JOB_NAME) : "";
+
+    // Truncate long job names
+    if (result.jobName.length() > 255) {
+      result.jobName = result.jobName.substring(0, 252) + "...";
+    }
   }
 
-  public static void retrieveURLs(JobResult result, SparkApplicationData appData) {
+  public static void retrieveURLs(AppResult result, SparkApplicationData appData) {
     String prop = appData.getEnvironmentData().getSparkProperty(SPARK_EXTRA_JAVA_OPTIONS);
+    String appId = appData.getAppId();
+
     if (prop != null) {
       try {
         Map<String, String> options = Utils.parseJavaOptions(prop);
@@ -76,10 +100,31 @@ public static void retrieveURLs(JobResult result, SparkApplicationData appData)
         }
         logger.info("Parsed options:" + StringUtils.join(s, ","));
 
-        result.jobExecUrl = unescapeString(options.get(AZKABAN_ATTEMPT_URL));
-        result.jobUrl = unescapeString(options.get(AZKABAN_JOB_URL));
-        result.flowExecUrl = unescapeString(options.get(AZKABAN_EXECUTION_URL));
-        result.flowUrl = unescapeString(options.get(AZKABAN_WORKFLOW_URL));
+        result.jobExecId = options.get(AZKABAN_ATTEMPT_URL) != null ?
+            truncate(unescapeString(options.get(AZKABAN_ATTEMPT_URL)), appId) : "";
+        result.jobDefId = options.get(AZKABAN_JOB_URL) != null ?
+            truncate(unescapeString(options.get(AZKABAN_JOB_URL)), appId) : "";
+        result.flowExecId = options.get(AZKABAN_EXECUTION_URL) != null ?
+            truncate(unescapeString(options.get(AZKABAN_EXECUTION_URL)), appId) : "";
+        result.flowDefId = options.get(AZKABAN_WORKFLOW_URL) != null ?
+            truncate(unescapeString(options.get(AZKABAN_WORKFLOW_URL)), appId) : "";
+
+        result.jobExecUrl = result.jobExecId;
+        result.jobDefUrl = result.jobDefId;
+        result.flowExecUrl = result.flowExecId;
+        result.flowDefUrl = result.flowDefId;
+
+        if (!result.jobExecId.isEmpty()) {
+          result.scheduler = "azkaban";
+          result.workflowDepth = 0;
+        }
+        result.jobName = options.get(AZKABAN_JOB_NAME) != null ? unescapeString(options.get(AZKABAN_JOB_NAME)) : "";
+
+        // Truncate long job names
+        if (result.jobName.length() > 255) {
+          result.jobName = result.jobName.substring(0, 252) + "...";
+        }
+
       } catch (IllegalArgumentException e) {
         logger.error("Encountered error while parsing java options into urls: " + e.getMessage());
       }
@@ -105,9 +150,9 @@ private static String unescapeString(String s) {
   }
 
   public static String truncate(String value, String jobId) {
-    if (value != null && value.length() > JobResult.URL_LEN_LIMIT) {
+    if (value != null && value.length() > AppResult.URL_LEN_LIMIT) {
       logger.info("Truncate long URL in job result for job: " + jobId + ". Original Url: " + value);
-      value = value.substring(0, JobResult.URL_LEN_LIMIT);
+      value = value.substring(0, AppResult.URL_LEN_LIMIT);
     }
     return value;
   }
diff --git a/app/com/linkedin/drelephant/util/Utils.java b/app/com/linkedin/drelephant/util/Utils.java
index 1ccbf1af4..d2066c9f1 100644
--- a/app/com/linkedin/drelephant/util/Utils.java
+++ b/app/com/linkedin/drelephant/util/Utils.java
@@ -16,13 +16,11 @@
 
 package com.linkedin.drelephant.util;
 
+import com.linkedin.drelephant.analysis.Severity;
 import java.io.IOException;
 import java.io.InputStream;
-import java.util.ArrayList;
 import java.util.HashMap;
-import java.util.List;
 import java.util.Map;
-import java.util.regex.Pattern;
 import javax.script.ScriptEngine;
 import javax.script.ScriptEngineManager;
 import javax.script.ScriptException;
@@ -40,8 +38,6 @@
  */
 public final class Utils {
   private static final Logger logger = Logger.getLogger(Utils.class);
-  // Matching x.x.x or x.x.x-li1 (x are numbers)
-  public static final Pattern VERSION_PATTERN = Pattern.compile("(\\d+)(?:\\.\\d+)*(?:\\-[\\dA-Za-z]+)?");
 
   private Utils() {
     // do nothing
@@ -50,7 +46,7 @@ private Utils() {
   /**
    * Given a mapreduce job's application id, get its corresponding job id
    *
-   * Note: before adding Spark analysers, all JobResult were using job ids as the primary key. But Spark and many
+   * Note: before adding Spark analysers, all AppResult were using job ids as the primary key. But Spark and many
    * other non-mapreduce applications do not have a job id. To maintain backwards compatibility, we replace
    * 'application' with 'job' to form a pseudo job id.
    *
@@ -122,71 +118,6 @@ public static Map<String, String> parseJavaOptions(String str) {
     return options;
   }
 
-  public static String combineCsvLines(String[] lines) {
-    StringBuilder sb = new StringBuilder();
-    for (String line : lines) {
-      sb.append(line).append("\n");
-    }
-    return sb.toString().trim();
-  }
-
-  public static String createCsvLine(String... parts) {
-    StringBuilder sb = new StringBuilder();
-    String quotes = "\"";
-    String comma = ",";
-    for (int i = 0; i < parts.length; i++) {
-      sb.append(quotes).append(parts[i].replaceAll(quotes, quotes + quotes)).append(quotes);
-      if (i != parts.length - 1) {
-        sb.append(comma);
-      }
-    }
-    return sb.toString();
-  }
-
-  public static String[][] parseCsvLines(String data) {
-    if (data.isEmpty()) {
-      return new String[0][];
-    }
-    String[] lines = data.split("\n");
-    String[][] result = new String[lines.length][];
-    for (int i = 0; i < lines.length; i++) {
-      result[i] = parseCsvLine(lines[i]);
-    }
-    return result;
-  }
-
-  public static String[] parseCsvLine(String line) {
-    List<String> store = new ArrayList<String>();
-    StringBuilder curVal = new StringBuilder();
-    boolean inquotes = false;
-    for (int i = 0; i < line.length(); i++) {
-      char ch = line.charAt(i);
-      if (inquotes) {
-        if (ch == '\"') {
-          inquotes = false;
-        } else {
-          curVal.append(ch);
-        }
-      } else {
-        if (ch == '\"') {
-          inquotes = true;
-          if (curVal.length() > 0) {
-            //if this is the second quote in a value, add a quote
-            //this is for the double quote in the middle of a value
-            curVal.append('\"');
-          }
-        } else if (ch == ',') {
-          store.add(curVal.toString());
-          curVal = new StringBuilder();
-        } else {
-          curVal.append(ch);
-        }
-      }
-    }
-    store.add(curVal.toString());
-    return store.toArray(new String[store.size()]);
-  }
-
   /**
    * Returns the configured thresholds after evaluating and verifying the levels.
    *
@@ -195,15 +126,16 @@ public static String[] parseCsvLine(String line) {
    * @return The evaluated threshold limits
    */
   public static double[] getParam(String rawLimits, int thresholdLevels) {
-    double[] parsedLimits = new double[thresholdLevels];
+    double[] parsedLimits = null;
 
-    if (rawLimits != null) {
+    if (rawLimits != null && !rawLimits.isEmpty()) {
       String[] thresholds = rawLimits.split(",");
       if (thresholds.length != thresholdLevels) {
         logger.error("Could not find " + thresholdLevels + " threshold levels in "  + rawLimits);
         parsedLimits = null;
       } else {
         // Evaluate the limits
+        parsedLimits = new double[thresholdLevels];
         ScriptEngineManager mgr = new ScriptEngineManager();
         ScriptEngine engine = mgr.getEngineByName("JavaScript");
         for (int i = 0; i < thresholdLevels; i++) {
@@ -220,4 +152,47 @@ public static double[] getParam(String rawLimits, int thresholdLevels) {
     return parsedLimits;
   }
 
+  /**
+   * Combine the parts into a comma separated String
+   *
+   * Example:
+   * input: part1 = "foo" and part2 = "bar"
+   * output = "foo,bar"
+   *
+   * @param parts The parts to combine
+   * @return The comma separated string
+   */
+  public static String commaSeparated(String... parts) {
+    StringBuilder sb = new StringBuilder();
+    String comma = ",";
+    if (parts.length != 0) {
+      sb.append(parts[0]);
+    }
+    for (int i = 1; i < parts.length; i++) {
+      if (parts[i] != null && !parts[i].isEmpty()) {
+        sb.append(comma);
+        sb.append(parts[i]);
+      }
+    }
+    return sb.toString();
+  }
+
+  /**
+   * Compute the score for the heuristic based on the number of tasks and severity.
+   * This is applicable only to mapreduce applications.
+   *
+   * Score = severity * num of tasks (where severity NOT in [NONE, LOW])
+   *
+   * @param severity The heuristic severity
+   * @param tasks The number of tasks (map/reduce)
+   * @return
+   */
+  public static int getHeuristicScore(Severity severity, int tasks) {
+    int score = 0;
+    if (severity != Severity.NONE && severity != Severity.LOW) {
+      score = severity.getValue() * tasks;
+    }
+    return score;
+  }
+
 }
diff --git a/app/controllers/Application.java b/app/controllers/Application.java
index 70b653b01..0990c5d9c 100644
--- a/app/controllers/Application.java
+++ b/app/controllers/Application.java
@@ -17,19 +17,19 @@
 package controllers;
 
 import com.avaje.ebean.ExpressionList;
-import com.avaje.ebean.RawSql;
-import com.avaje.ebean.RawSqlBuilder;
+import com.fasterxml.jackson.core.JsonGenerationException;
+import com.fasterxml.jackson.databind.JsonMappingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import com.linkedin.drelephant.ElephantContext;
 import com.linkedin.drelephant.analysis.Severity;
 import com.linkedin.drelephant.configurations.heuristic.HeuristicConfigurationData;
+import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
-import java.text.ParseException;
-import java.text.SimpleDateFormat;
 import java.util.ArrayList;
-import java.util.Calendar;
+import java.util.Comparator;
 import java.util.Date;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
@@ -37,9 +37,9 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.TimeZone;
-import model.JobHeuristicResult;
-import model.JobResult;
+import java.util.TreeSet;
+import models.AppHeuristicResult;
+import models.AppResult;
 import org.apache.http.client.utils.URLEncodedUtils;
 import org.apache.http.message.BasicNameValuePair;
 import org.apache.log4j.Logger;
@@ -78,11 +78,11 @@ public class Application extends Controller {
   private static final int MAX_HISTORY_LIMIT = 15;            // Upper limit on the number of executions to display
 
   // Form and Rest parameters
-  private static final String JOB_ID = "id";
-  private static final String FLOW_URL = "flow-url";
-  private static final String FLOW_EXEC_URL = "flow-exec-url";
-  private static final String JOB_URL = "job-url";
-  private static final String USER = "user";
+  private static final String APP_ID = "id";
+  private static final String FLOW_DEF_ID = "flow-def-id";
+  private static final String FLOW_EXEC_ID = "flow-exec-id";
+  private static final String JOB_DEF_ID = "job-def-id";
+  private static final String USERNAME = "username";
   private static final String SEVERITY = "severity";
   private static final String JOB_TYPE = "job-type";
   private static final String ANALYSIS = "analysis";
@@ -90,14 +90,10 @@ public class Application extends Controller {
   private static final String STARTED_TIME_END = "started-time-end";
   private static final String FINISHED_TIME_BEGIN = "finished-time-begin";
   private static final String FINISHED_TIME_END = "finished-time-end";
-  private static final String COMPARE_FLOW_URL1 = "flow-exec-url1";
-  private static final String COMPARE_FLOW_URL2 = "flow-exec-url2";
+  private static final String COMPARE_FLOW_ID1 = "flow-exec-id1";
+  private static final String COMPARE_FLOW_ID2 = "flow-exec-id2";
   private static final String PAGE = "page";
 
-  // Time range specifier. [TIME_RANGE_BEGIN, TIME_RANGE_END]
-  private static final boolean TIME_RANGE_BEGIN = false;
-  private static final boolean TIME_RANGE_END = true;
-
   private static long _lastFetch = 0;
   private static int _numJobsAnalyzed = 0;
   private static int _numJobsCritical = 0;
@@ -115,23 +111,39 @@ public class Application extends Controller {
   }
 
   /**
-   * Controls the Home page of Dr. Elephant
+   * Controls the Home page of Dr. Elephant.
+   *
+   * Displays the latest jobs which were analysed in the last 24 hours.
    */
   public static Result dashboard() {
     long now = System.currentTimeMillis();
+    Date finishDate = new Date(now - DAY);
+
+    // Update statistics only after FETCH_DELAY
     if (now - _lastFetch > FETCH_DELAY) {
-      _numJobsAnalyzed = JobResult.find.where().gt(JobResult.TABLE.ANALYSIS_TIME, now - DAY).findRowCount();
-      _numJobsCritical =
-          JobResult.find.where().gt(JobResult.TABLE.ANALYSIS_TIME, now - DAY)
-              .eq(JobResult.TABLE.SEVERITY, Severity.CRITICAL.getValue()).findRowCount();
-      _numJobsSevere =
-          JobResult.find.where().gt(JobResult.TABLE.ANALYSIS_TIME, now - DAY)
-              .eq(JobResult.TABLE.SEVERITY, Severity.SEVERE.getValue()).findRowCount();
+      _numJobsAnalyzed = AppResult.find.where()
+          .gt(AppResult.TABLE.FINISH_TIME, finishDate)
+          .findRowCount();
+      _numJobsCritical = AppResult.find.where()
+          .gt(AppResult.TABLE.FINISH_TIME, finishDate)
+          .eq(AppResult.TABLE.SEVERITY, Severity.CRITICAL.getValue())
+          .findRowCount();
+      _numJobsSevere = AppResult.find.where()
+          .gt(AppResult.TABLE.FINISH_TIME, finishDate)
+          .eq(AppResult.TABLE.SEVERITY, Severity.SEVERE.getValue())
+          .findRowCount();
       _lastFetch = now;
     }
-    List<JobResult> results =
-        JobResult.find.where().gt(JobResult.TABLE.ANALYSIS_TIME, now - DAY).order().desc(JobResult.TABLE.ANALYSIS_TIME)
-            .setMaxRows(50).fetch("heuristicResults").findList();
+
+    // Fetch only required fields for jobs analysed in the last 24 hours up to a max of 50 jobs
+    List<AppResult> results = AppResult.find
+        .select(AppResult.getSearchFields())
+        .where()
+        .gt(AppResult.TABLE.FINISH_TIME, finishDate)
+        .order().desc(AppResult.TABLE.FINISH_TIME)
+        .setMaxRows(50)
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields())
+        .findList();
 
     return ok(homePage.render(_numJobsAnalyzed, _numJobsSevere, _numJobsCritical,
         searchResults.render("Latest analysis", results)));
@@ -142,26 +154,38 @@ public static Result dashboard() {
    */
   public static Result search() {
     DynamicForm form = Form.form().bindFromRequest(request());
-    String jobId = form.get(JOB_ID);
-    jobId = jobId != null ? jobId.trim() : "";
-    String flowUrl = form.get(FLOW_URL);
-    flowUrl = (flowUrl != null) ? flowUrl.trim() : null;
-
-    // Search and display job information when job id or flow execution url is provided.
-    if (!jobId.isEmpty()) {
-      JobResult result = JobResult.find.byId(jobId);
+    String appId = form.get(APP_ID);
+    appId = appId != null ? appId.trim() : "";
+    if (appId.contains("job")) {
+      appId = appId.replaceAll("job", "application");
+    }
+    String flowExecId = form.get(FLOW_EXEC_ID);
+    flowExecId = (flowExecId != null) ? flowExecId.trim() : null;
+
+    // Search and display job details when job id or flow execution url is provided.
+    if (!appId.isEmpty()) {
+      AppResult result = AppResult.find.select("*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+              + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+          .where()
+          .idEq(appId).findUnique();
       if (result != null) {
         return ok(searchPage.render(null, jobDetails.render(result)));
       } else {
         return ok(searchPage.render(null, jobDetails.render(null)));
       }
-    } else if (flowUrl != null && !flowUrl.isEmpty()) {
-      List<JobResult> results = JobResult.find.where().eq(JobResult.TABLE.FLOW_EXEC_URL, flowUrl).findList();
-      Map<String, List<JobResult>> map = groupJobs(results, GroupBy.JOB_EXECUTION_URL);
-      return ok(searchPage.render(null, flowDetails.render(flowUrl, map)));
+    } else if (flowExecId != null && !flowExecId.isEmpty()) {
+      List<AppResult> results = AppResult.find
+          .select(AppResult.getSearchFields() + "," + AppResult.TABLE.JOB_EXEC_ID)
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields())
+          .where().eq(AppResult.TABLE.FLOW_EXEC_ID, flowExecId)
+          .findList();
+      Map<IdUrlPair, List<AppResult>> map = groupJobs(results, GroupBy.JOB_EXECUTION_ID);
+      return ok(searchPage.render(null, flowDetails.render(flowExecId, map)));
     }
 
-    // Paginate the results
+    // Prepare pagination of results
     PaginationStats paginationStats = new PaginationStats(PAGE_LENGTH, PAGE_BAR_LENGTH);
     int pageLength = paginationStats.getPageLength();
     paginationStats.setCurrentPage(1);
@@ -178,20 +202,22 @@ public static Result search() {
     int paginationBarStartIndex = paginationStats.getPaginationBarStartIndex();
 
     // Filter jobs by search parameters
-    ExpressionList<JobResult> query = generateQuery();
-    List<JobResult> results =
-        query.order().desc(JobResult.TABLE.ANALYSIS_TIME).setFirstRow((paginationBarStartIndex - 1) * pageLength)
-            .setMaxRows((paginationStats.getPageBarLength() - 1) * pageLength + 1).findList();
+    ExpressionList<AppResult> searchQuery = AppResult.find.select(AppResult.getSearchFields()).where();
+    ExpressionList<AppResult> query = generateSearchQuery(searchQuery);
+    List<AppResult> results = query
+        .order().desc(AppResult.TABLE.FINISH_TIME)
+        .setFirstRow((paginationBarStartIndex - 1) * pageLength)
+        .setMaxRows((paginationStats.getPageBarLength() - 1) * pageLength + 1)
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields())
+        .findList();
     paginationStats.setQueryString(getQueryString());
     if (results.isEmpty() || currentPage > paginationStats.computePaginationBarEndIndex(results.size())) {
       return ok(searchPage.render(null, jobDetails.render(null)));
     } else {
-      return ok(searchPage.render(
-          paginationStats,
-          searchResults.render(
-              "Results",
-              results.subList((currentPage - paginationBarStartIndex) * pageLength,
-                  Math.min(results.size(), (currentPage - paginationBarStartIndex + 1) * pageLength)))));
+      return ok(searchPage.render(paginationStats,
+          searchResults.render("Results",
+              results.subList((currentPage - paginationBarStartIndex) * pageLength, Math.min(results.size(),
+                  (currentPage - paginationBarStartIndex + 1) * pageLength)))));
     }
   }
 
@@ -218,13 +244,13 @@ private static String getQueryString() {
   }
 
   /**
-   * Build SQL expression
+   * Build SQL predicates for Search Query
    *
-   * @return An sql expression on Job Result
+   * @return An sql expression on App Result
    */
-  private static ExpressionList<JobResult> generateQuery() {
+  private static ExpressionList<AppResult> generateSearchQuery(ExpressionList<AppResult> query) {
     DynamicForm form = Form.form().bindFromRequest(request());
-    String username = form.get(USER);
+    String username = form.get(USERNAME);
     username = username != null ? username.trim().toLowerCase() : null;
     String severity = form.get(SEVERITY);
     String jobType = form.get(JOB_TYPE);
@@ -234,37 +260,19 @@ private static ExpressionList<JobResult> generateQuery() {
     String startedTimeBegin = form.get(STARTED_TIME_BEGIN);
     String startedTimeEnd = form.get(STARTED_TIME_END);
 
-    ExpressionList<JobResult> query = JobResult.find.where();
-
-    RawSql rawsql = null;
-    // Hint usage of username index to mysql whenever our query contains a predicate on username
-    if (isSet(severity) && isSet(analysis)) {
-      if (isSet(username)) {
-        rawsql = RawSqlBuilder.parse(QueryHandler.getSqlJoinQueryWithUsernameIndex().toString()).create();
-      } else {
-        rawsql = RawSqlBuilder.parse(QueryHandler.getSqlJoinQuery().toString()).create();
-      }
-    } else {
-      if (isSet(username)) {
-        rawsql = RawSqlBuilder.parse(QueryHandler.getJobResultQueryWithUsernameIndex().toString()).create();
-      }
-    }
-    query = query.query().setRawSql(rawsql).where();
-
     // Build predicates
     if (isSet(username)) {
-      query = query.like(JobResult.TABLE.USERNAME, username);
+      query = query.eq(AppResult.TABLE.USERNAME, username);
     }
     if (isSet(jobType)) {
-      query = query.eq(JobResult.TABLE.JOB_TYPE, jobType);
+      query = query.eq(AppResult.TABLE.JOB_TYPE, jobType);
     }
     if (isSet(severity)) {
       if (isSet(analysis)) {
-        query =
-            query.eq(JobHeuristicResult.TABLE.TABLE_NAME + "." + JobHeuristicResult.TABLE.ANALYSIS_NAME, analysis).ge(
-                JobHeuristicResult.TABLE.TABLE_NAME + "." + JobHeuristicResult.TABLE.SEVERITY, severity);
+        query = query.eq(AppResult.TABLE.APP_HEURISTIC_RESULTS + "." + AppHeuristicResult.TABLE.HEURISTIC_NAME, analysis)
+            .ge(AppResult.TABLE.APP_HEURISTIC_RESULTS + "." + AppHeuristicResult.TABLE.SEVERITY, severity);
       } else {
-        query = query.ge(JobResult.TABLE.SEVERITY, severity);
+        query = query.ge(AppResult.TABLE.SEVERITY, severity);
       }
     }
 
@@ -272,25 +280,25 @@ private static ExpressionList<JobResult> generateQuery() {
     if (isSet(startedTimeBegin)) {
       long time = parseTime(startedTimeBegin);
       if (time > 0) {
-        query = query.ge(JobResult.TABLE.ANALYSIS_TIME, time);
+        query = query.ge(AppResult.TABLE.FINISH_TIME, new Date(time));
       }
     }
     if (isSet(startedTimeEnd)) {
       long time = parseTime(startedTimeEnd);
       if (time > 0) {
-        query = query.le(JobResult.TABLE.ANALYSIS_TIME, time);
+        query = query.le(AppResult.TABLE.FINISH_TIME, new Date(time));
       }
     }
     if (isSet(finishedTimeBegin)) {
       long time = parseTime(finishedTimeBegin);
       if (time > 0) {
-        query = query.ge(JobResult.TABLE.ANALYSIS_TIME, time);
+        query = query.ge(AppResult.TABLE.FINISH_TIME, new Date(time));
       }
     }
     if (isSet(finishedTimeEnd)) {
       long time = parseTime(finishedTimeEnd);
       if (time > 0) {
-        query = query.le(JobResult.TABLE.ANALYSIS_TIME, time);
+        query = query.le(AppResult.TABLE.FINISH_TIME, new Date(time));
       }
     }
 
@@ -302,41 +310,74 @@ private static ExpressionList<JobResult> generateQuery() {
    */
   public static Result compare() {
     DynamicForm form = Form.form().bindFromRequest(request());
-    String flowExecUrl1 = form.get(COMPARE_FLOW_URL1);
-    flowExecUrl1 = (flowExecUrl1 != null) ? flowExecUrl1.trim() : null;
-    String flowExecUrl2 = form.get(COMPARE_FLOW_URL2);
-    flowExecUrl2 = (flowExecUrl2 != null) ? flowExecUrl2.trim() : null;
-    return ok(comparePage.render(compareResults.render(compareFlows(flowExecUrl1, flowExecUrl2))));
+    String flowExecId1 = form.get(COMPARE_FLOW_ID1);
+    flowExecId1 = (flowExecId1 != null) ? flowExecId1.trim() : null;
+    String flowExecId2 = form.get(COMPARE_FLOW_ID2);
+    flowExecId2 = (flowExecId2 != null) ? flowExecId2.trim() : null;
+
+    List<AppResult> results1 = null;
+    List<AppResult> results2 = null;
+    if (flowExecId1 != null && !flowExecId1.isEmpty() && flowExecId2 != null && !flowExecId2.isEmpty()) {
+       results1 = AppResult.find
+          .select(AppResult.getSearchFields() + "," + AppResult.TABLE.JOB_DEF_ID + "," + AppResult.TABLE.JOB_DEF_URL
+              + "," + AppResult.TABLE.FLOW_EXEC_ID + "," + AppResult.TABLE.FLOW_EXEC_URL)
+          .where().eq(AppResult.TABLE.FLOW_EXEC_ID, flowExecId1).setMaxRows(100)
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields())
+          .findList();
+       results2 = AppResult.find
+          .select(
+              AppResult.getSearchFields() + "," + AppResult.TABLE.JOB_DEF_ID + "," + AppResult.TABLE.JOB_DEF_URL + ","
+                  + AppResult.TABLE.FLOW_EXEC_ID + "," + AppResult.TABLE.FLOW_EXEC_URL)
+          .where().eq(AppResult.TABLE.FLOW_EXEC_ID, flowExecId2).setMaxRows(100)
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields())
+          .findList();
+    }
+    return ok(comparePage.render(compareResults.render(compareFlows(results1, results2))));
   }
 
   /**
    * Helper Method for the compare controller.
    * This Compares 2 flow executions at job level.
    *
-   * @param flowExecUrl1 The flow execution url to be compared
-   * @param flowExecUrl2 The other flow execution url to be compared against
+   * @param results1 The list of jobs under flow execution 1
+   * @param results2 The list of jobs under flow execution 2
    * @return A map of Job Urls to the list of jobs corresponding to the 2 flow execution urls
    */
-  private static Map<String, Map<String, List<JobResult>>> compareFlows(String flowExecUrl1, String flowExecUrl2) {
-    Map<String, Map<String, List<JobResult>>> jobDefMap = new HashMap<String, Map<String, List<JobResult>>>();
-
-    if (flowExecUrl1 != null && !flowExecUrl1.isEmpty() && flowExecUrl2 != null && !flowExecUrl2.isEmpty()) {
-      List<JobResult> results1 = JobResult.find.where().eq(JobResult.TABLE.FLOW_EXEC_URL, flowExecUrl1).findList();
-      List<JobResult> results2 = JobResult.find.where().eq(JobResult.TABLE.FLOW_EXEC_URL, flowExecUrl2).findList();
-
-      Map<String, List<JobResult>> map1 = groupJobs(results1, GroupBy.JOB_DEFINITION_URL);
-      Map<String, List<JobResult>> map2 = groupJobs(results2, GroupBy.JOB_DEFINITION_URL);
-
-      // We want to display jobs that are common to the two flows first and then display jobs in flow 1 and flow 2.
-      Set<String> CommonFlows = Sets.intersection(map1.keySet(), map2.keySet());
-      Set<String> orderedFlowSet = Sets.union(CommonFlows, map1.keySet());
-      Set<String> union = Sets.union(orderedFlowSet, map2.keySet());
-
-      for (String jobDefUrl : union) {
-        Map<String, List<JobResult>> flowExecMap = new LinkedHashMap<String, List<JobResult>>();
-        flowExecMap.put(flowExecUrl1, map1.get(jobDefUrl));
-        flowExecMap.put(flowExecUrl2, map2.get(jobDefUrl));
-        jobDefMap.put(jobDefUrl, flowExecMap);
+  private static Map<IdUrlPair, Map<IdUrlPair, List<AppResult>>> compareFlows(List<AppResult> results1,
+      List<AppResult> results2) {
+    Map<IdUrlPair, Map<IdUrlPair, List<AppResult>>> jobDefMap = new HashMap<IdUrlPair, Map<IdUrlPair, List<AppResult>>>();
+
+    if (results1 != null && !results1.isEmpty() && results2 != null && !results2.isEmpty()) {
+
+      IdUrlPair flow1 = new IdUrlPair(results1.get(0).flowExecId, results1.get(0).flowExecUrl);
+      IdUrlPair flow2 = new IdUrlPair(results2.get(0).flowExecId, results2.get(0).flowExecUrl);
+
+      Map<IdUrlPair, List<AppResult>> map1 = groupJobs(results1, GroupBy.JOB_DEFINITION_ID);
+      Map<IdUrlPair, List<AppResult>> map2 = groupJobs(results2, GroupBy.JOB_DEFINITION_ID);
+
+      final Set<IdUrlPair> group1 = new TreeSet<IdUrlPair>(new Comparator<IdUrlPair>(){
+        public int compare(final IdUrlPair o1, final IdUrlPair o2){
+          return o1.getId().compareToIgnoreCase(o2.getId());
+        }
+      } );
+      group1.addAll(map1.keySet());
+      final Set<IdUrlPair> group2 = new TreeSet<IdUrlPair>(new Comparator<IdUrlPair>(){
+        public int compare(final IdUrlPair o1, final IdUrlPair o2){
+          return o1.getId().compareToIgnoreCase(o2.getId());
+        }
+      } );
+      group2.addAll(map2.keySet());
+
+      // Display jobs that are common to the two flows first followed by jobs in flow 1 and flow 2.
+      Set<IdUrlPair> CommonJobs = Sets.intersection(group1, group2);
+      Set<IdUrlPair> orderedFlowSet = Sets.union(CommonJobs, group1);
+      Set<IdUrlPair> union = Sets.union(orderedFlowSet, group2);
+
+      for (IdUrlPair pair : union) {
+        Map<IdUrlPair, List<AppResult>> flowExecMap = new LinkedHashMap<IdUrlPair, List<AppResult>>();
+        flowExecMap.put(flow1, map1.get(pair));
+        flowExecMap.put(flow2, map2.get(pair));
+        jobDefMap.put(pair, flowExecMap);
       }
     }
     return jobDefMap;
@@ -347,43 +388,55 @@ private static Map<String, Map<String, List<JobResult>>> compareFlows(String flo
    */
   public static Result flowHistory() {
     DynamicForm form = Form.form().bindFromRequest(request());
-    String flowUrl = form.get(FLOW_URL);
-    flowUrl = (flowUrl != null) ? flowUrl.trim() : null;
-    if (flowUrl == null || flowUrl.isEmpty()) {
+    String flowDefId = form.get(FLOW_DEF_ID);
+    flowDefId = (flowDefId != null) ? flowDefId.trim() : null;
+    if (flowDefId == null || flowDefId.isEmpty()) {
       return ok(flowHistoryPage.render(flowHistoryResults.render(null, null, null, null)));
     }
 
     // Fetch available flow executions with latest JOB_HISTORY_LIMIT mr jobs.
-    List<JobResult> results = JobResult.find.where().eq(JobResult.TABLE.FLOW_URL, flowUrl).order()
-        .desc(JobResult.TABLE.ANALYSIS_TIME).setMaxRows(JOB_HISTORY_LIMIT).findList();
+    List<AppResult> results = AppResult.find
+        .select(
+            AppResult.getSearchFields() + "," + AppResult.TABLE.FLOW_EXEC_ID + "," + AppResult.TABLE.FLOW_EXEC_URL + ","
+                + AppResult.TABLE.JOB_DEF_ID + "," + AppResult.TABLE.JOB_DEF_URL + "," + AppResult.TABLE.JOB_NAME)
+        .where().eq(AppResult.TABLE.FLOW_DEF_ID, flowDefId)
+        .order().desc(AppResult.TABLE.FINISH_TIME)
+        .setMaxRows(JOB_HISTORY_LIMIT)
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields())
+        .findList();
     if (results.size() == 0) {
-      return notFound("Unable to find record on flow url: " + flowUrl);
+      return notFound("Unable to find record on flow url: " + flowDefId);
     }
-    Map<String, List<JobResult>> flowExecUrlToJobsMap =  limitHistoryResults(
-        groupJobs(results, GroupBy.FLOW_EXECUTION_URL), results.size(), MAX_HISTORY_LIMIT);
+    Map<IdUrlPair, List<AppResult>> flowExecIdToJobsMap =  limitHistoryResults(
+        groupJobs(results, GroupBy.FLOW_EXECUTION_ID), results.size(), MAX_HISTORY_LIMIT);
 
     // Compute flow execution data
-    List<JobResult> filteredResults = new ArrayList<JobResult>();     // All mr jobs starting from latest execution
-    List<Long> flowExecTimeList = new ArrayList<Long>();              // To map executions to resp execution time
-    Map<String, Map<String, List<JobResult>>> executionMap = new LinkedHashMap<String, Map<String, List<JobResult>>>();
-    for (Map.Entry<String, List<JobResult>> entry: flowExecUrlToJobsMap.entrySet()) {
+    List<AppResult> filteredResults = new ArrayList<AppResult>();     // All jobs starting from latest execution
+    List<Long> flowExecTimeList = new ArrayList<Long>();         // To map executions to resp execution time
+    Map<IdUrlPair, Map<IdUrlPair, List<AppResult>>> executionMap =
+        new LinkedHashMap<IdUrlPair, Map<IdUrlPair, List<AppResult>>>();
+    for (Map.Entry<IdUrlPair, List<AppResult>> entry: flowExecIdToJobsMap.entrySet()) {
 
-      // Reverse the list content from desc order of analysis_time to increasing order so that when grouping we get
+      // Reverse the list content from desc order of finish time to increasing order so that when grouping we get
       // the job list in the order of completion.
-      List<JobResult> mrJobsList = Lists.reverse(entry.getValue());
+      List<AppResult> mrJobsList = Lists.reverse(entry.getValue());
 
-      // Flow exec time is the analysis_time of the last mr job in the flow
-      flowExecTimeList.add(mrJobsList.get(mrJobsList.size() - 1).analysisTime);
+      // Flow exec time is the finish time of the last mr job in the flow
+      flowExecTimeList.add(mrJobsList.get(mrJobsList.size() - 1).finishTime.getTime());
 
       filteredResults.addAll(mrJobsList);
-      executionMap.put(entry.getKey(), groupJobs(mrJobsList, GroupBy.JOB_DEFINITION_URL));
+      executionMap.put(entry.getKey(), groupJobs(mrJobsList, GroupBy.JOB_DEFINITION_ID));
     }
 
     // Calculate unique list of jobs (job def url) to maintain order across executions. List will contain job def urls
     // from latest execution first followed by any other extra job def url that may appear in previous executions.
-    List<String> jobDefUrlList = new ArrayList<String>(groupJobs(filteredResults, GroupBy.JOB_DEFINITION_URL).keySet());
+    Map<IdUrlPair, String> idPairToJobNameMap = new HashMap<IdUrlPair, String>();
+    Map<IdUrlPair, List<AppResult>> filteredMap = groupJobs(filteredResults, GroupBy.JOB_DEFINITION_ID);
+    for (Map.Entry<IdUrlPair, List<AppResult>> entry: filteredMap.entrySet()) {
+      idPairToJobNameMap.put(entry.getKey(), filteredMap.get(entry.getKey()).get(0).jobName);
+    }
 
-    return ok(flowHistoryPage.render(flowHistoryResults.render(flowUrl, executionMap, jobDefUrlList,
+    return ok(flowHistoryPage.render(flowHistoryResults.render(flowDefId, executionMap, idPairToJobNameMap,
         flowExecTimeList)));
   }
 
@@ -392,44 +445,50 @@ public static Result flowHistory() {
    */
   public static Result jobHistory() {
     DynamicForm form = Form.form().bindFromRequest(request());
-    String jobUrl = form.get(JOB_URL);
-    jobUrl = (jobUrl != null) ? jobUrl.trim() : null;
-    if (jobUrl == null || jobUrl.isEmpty()) {
+    String jobDefId = form.get(JOB_DEF_ID);
+    jobDefId = (jobDefId != null) ? jobDefId.trim() : null;
+    if (jobDefId == null || jobDefId.isEmpty()) {
       return ok(jobHistoryPage.render(jobHistoryResults.render(null, null, -1, null)));
     }
 
     // Fetch all job executions
-    List<JobResult> results = JobResult.find.where().eq(JobResult.TABLE.JOB_URL, jobUrl).order()
-        .desc(JobResult.TABLE.ANALYSIS_TIME).setMaxRows(JOB_HISTORY_LIMIT).findList();
+    List<AppResult> results = AppResult.find
+        .select(AppResult.getSearchFields() + "," + AppResult.TABLE.FLOW_EXEC_ID + "," + AppResult.TABLE.FLOW_EXEC_URL)
+        .where().eq(AppResult.TABLE.JOB_DEF_ID, jobDefId)
+        .order().desc(AppResult.TABLE.FINISH_TIME).setMaxRows(JOB_HISTORY_LIMIT)
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+            + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+        .findList();
     if (results.size() == 0) {
-      return notFound("Unable to find record on job url: " + jobUrl);
+      return notFound("Unable to find record on job url: " + jobDefId);
     }
-    Map<String, List<JobResult>> flowExecUrlToJobsMap =
-        limitHistoryResults(groupJobs(results, GroupBy.FLOW_EXECUTION_URL), results.size(), MAX_HISTORY_LIMIT);
+    Map<IdUrlPair, List<AppResult>> flowExecIdToJobsMap =
+        limitHistoryResults(groupJobs(results, GroupBy.FLOW_EXECUTION_ID), results.size(), MAX_HISTORY_LIMIT);
 
     // Compute job execution data
     List<Long> flowExecTimeList = new ArrayList<Long>();
     int maxStages = 0;
-    Map<String, List<JobResult>> executionMap = new LinkedHashMap<String, List<JobResult>>();
-    for (Map.Entry<String, List<JobResult>> entry: flowExecUrlToJobsMap.entrySet()) {
+    Map<IdUrlPair, List<AppResult>> executionMap = new LinkedHashMap<IdUrlPair, List<AppResult>>();
+    for (Map.Entry<IdUrlPair, List<AppResult>> entry: flowExecIdToJobsMap.entrySet()) {
 
-      // Reverse the list content from desc order of analysis_time to increasing order so that when grouping we get
+      // Reverse the list content from desc order of finish time to increasing order so that when grouping we get
       // the job list in the order of completion.
-      List<JobResult> mrJobsList = Lists.reverse(entry.getValue());
+      List<AppResult> mrJobsList = Lists.reverse(entry.getValue());
 
-      // Get the analysis_time of the last mr job that completed in current flow.
-      flowExecTimeList.add(mrJobsList.get(mrJobsList.size() - 1).analysisTime);
+      // Get the finish time of the last mr job that completed in current flow.
+      flowExecTimeList.add(mrJobsList.get(mrJobsList.size() - 1).finishTime.getTime());
 
       // Find the maximum number of mr stages for any job execution
-      int stageSize = flowExecUrlToJobsMap.get(entry.getKey()).size();
+      int stageSize = flowExecIdToJobsMap.get(entry.getKey()).size();
       if (stageSize > maxStages) {
         maxStages = stageSize;
       }
 
-      executionMap.put(entry.getKey(), Lists.reverse(flowExecUrlToJobsMap.get(entry.getKey())));
+      executionMap.put(entry.getKey(), Lists.reverse(flowExecIdToJobsMap.get(entry.getKey())));
     }
 
-    return ok(jobHistoryPage.render(jobHistoryResults.render(jobUrl, executionMap, maxStages, flowExecTimeList)));
+    return ok(jobHistoryPage.render(jobHistoryResults.render(jobDefId, executionMap, maxStages, flowExecTimeList)));
   }
 
   /**
@@ -449,16 +508,16 @@ public static Result jobHistory() {
    * @param execLimit The upper limit on the number of executions to be displayed.
    * @return A map after applying the limit.
    */
-  private static Map<String, List<JobResult>> limitHistoryResults(Map<String, List<JobResult>> map, int size,
-      int execLimit) {
-    Map<String, List<JobResult>> resultMap = new LinkedHashMap<String, List<JobResult>>();
+  private static Map<IdUrlPair, List<AppResult>> limitHistoryResults(Map<IdUrlPair, List<AppResult>> map,
+      int size, int execLimit) {
+    Map<IdUrlPair, List<AppResult>> resultMap = new LinkedHashMap<IdUrlPair, List<AppResult>>();
 
     int limit;
     if (size < JOB_HISTORY_LIMIT) {
       // No pruning needed. 100% correct.
       limit = execLimit;
     } else {
-      Set<String> keySet = map.keySet();
+      Set<IdUrlPair> keySet = map.keySet();
       if (keySet.size() > 10) {
         // Prune last 3 executions
         limit = keySet.size() > (execLimit + 3) ? execLimit : keySet.size() - 3;
@@ -470,7 +529,7 @@ private static Map<String, List<JobResult>> limitHistoryResults(Map<String, List
 
     // Filtered results
     int i = 1;
-    for (Map.Entry<String, List<JobResult>> entry : map.entrySet()) {
+    for (Map.Entry<IdUrlPair, List<AppResult>> entry : map.entrySet()) {
       if (i > limit) {
         break;
       }
@@ -561,35 +620,45 @@ private static boolean isSet(String property) {
 
   /**
    * Rest API for searching a particular job information
+   * E.g, localhost:8080/rest/job?id=xyz
    */
-  public static Result restJobResult(String jobId) {
+  public static Result restAppResult(String id) {
 
-    if (jobId == null || jobId.isEmpty()) {
+    if (id == null || id.isEmpty()) {
       return badRequest("No job id provided.");
     }
-
-    JobResult result = JobResult.find.byId(jobId);
-
-    if (result == null) {
-      return notFound("Unable to find record on job id: " + jobId);
+    if (id.contains("job")) {
+      id = id.replaceAll("job", "application");
     }
 
+    AppResult result = AppResult.find.select("*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+            + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+        .where()
+        .idEq(id).findUnique();
+
     return ok(Json.toJson(result));
   }
 
   /**
    * Rest API for searching all jobs triggered by a particular Scheduler Job
+   * E.g., localhost:8080/rest/jobexec?id=xyz
    */
-  public static Result restJobExecResult(String jobExecUrl) {
+  public static Result restJobExecResult(String jobExecId) {
 
-    if (jobExecUrl == null || jobExecUrl.isEmpty()) {
+    if (jobExecId == null || jobExecId.isEmpty()) {
       return badRequest("No job exec url provided.");
     }
 
-    List<JobResult> result = JobResult.find.where().eq(JobResult.TABLE.JOB_EXEC_URL, jobExecUrl).findList();
+    List<AppResult> result = AppResult.find.select("*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "." + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+        .where().eq(AppResult.TABLE.JOB_EXEC_ID, jobExecId)
+        .findList();
 
     if (result.size() == 0) {
-      return notFound("Unable to find record on job exec url: " + jobExecUrl);
+      return notFound("Unable to find record on job exec url: " + jobExecId);
     }
 
     return ok(Json.toJson(result));
@@ -597,63 +666,91 @@ public static Result restJobExecResult(String jobExecUrl) {
 
   /**
    * Rest API for searching all jobs under a particular flow execution
+   * E.g., localhost:8080/rest/flowexec?id=xyz
    */
-  public static Result restFlowExecResult(String flowExecUrl) {
+  public static Result restFlowExecResult(String flowExecId) {
 
-    if (flowExecUrl == null || flowExecUrl.isEmpty()) {
+    if (flowExecId == null || flowExecId.isEmpty()) {
       return badRequest("No flow exec url provided.");
     }
 
-    List<JobResult> results = JobResult.find.where().eq(JobResult.TABLE.FLOW_EXEC_URL, flowExecUrl).findList();
+    List<AppResult> results = AppResult.find.select("*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "." + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+        .where().eq(AppResult.TABLE.FLOW_EXEC_ID, flowExecId)
+        .findList();
 
     if (results.size() == 0) {
-      return notFound("Unable to find record on flow exec url: " + flowExecUrl);
+      return notFound("Unable to find record on flow exec url: " + flowExecId);
     }
 
-    Map<String, List<JobResult>> resMap = groupJobs(results, GroupBy.JOB_EXECUTION_URL);
+    Map<IdUrlPair, List<AppResult>> groupMap = groupJobs(results, GroupBy.JOB_EXECUTION_ID);
+
+    Map<String, List<AppResult>> resMap = new HashMap<String, List<AppResult>>();
+    for (Map.Entry<IdUrlPair, List<AppResult>> entry : groupMap.entrySet()) {
+      IdUrlPair jobExecPair = entry.getKey();
+      List<AppResult> value = entry.getValue();
+      resMap.put(jobExecPair.getId(), value);
+    }
 
     return ok(Json.toJson(resMap));
   }
 
   static enum GroupBy {
-    JOB_EXECUTION_URL,
-    JOB_DEFINITION_URL,
-    FLOW_EXECUTION_URL
+    JOB_EXECUTION_ID,
+    JOB_DEFINITION_ID,
+    FLOW_EXECUTION_ID
   }
 
   /**
-   * Grouping a list of JobResult by GroupBy enum.
+   * Grouping a list of AppResult by GroupBy enum.
    *
-   * @param results The list of jobs of type JobResult to be grouped.
+   * @param results The list of jobs of type AppResult to be grouped.
    * @param groupBy The field by which the results have to be grouped.
    * @return A map with the grouped field as the key and the list of jobs as the value.
    */
-  private static Map<String, List<JobResult>> groupJobs(List<JobResult> results, GroupBy groupBy) {
-
-    Map<String, List<JobResult>> resultMap = new LinkedHashMap<String, List<JobResult>>();
+  private static Map<IdUrlPair, List<AppResult>> groupJobs(List<AppResult> results, GroupBy groupBy) {
+    Map<String, List<AppResult>> groupMap = new LinkedHashMap<String, List<AppResult>>();
+    Map<String, String> idUrlMap = new HashMap<String, String>();
 
-    for (JobResult result : results) {
-      String field = null;
+    for (AppResult result : results) {
+      String idField = null;
+      String urlField = null;
       switch (groupBy) {
-        case JOB_EXECUTION_URL:
-          field = result.jobExecUrl;
+        case JOB_EXECUTION_ID:
+          idField = result.jobExecId;
+          urlField = result.jobExecUrl;
           break;
-        case JOB_DEFINITION_URL:
-          field = result.jobUrl;
+        case JOB_DEFINITION_ID:
+          idField = result.jobDefId;
+          urlField = result.jobDefUrl;
           break;
-        case FLOW_EXECUTION_URL:
-          field = result.flowExecUrl;
+        case FLOW_EXECUTION_ID:
+          idField = result.flowExecId;
+          urlField = result.flowExecUrl;
           break;
       }
+      if (!idUrlMap.containsKey(idField)) {
+        idUrlMap.put(idField, urlField);
+      }
 
-      if (resultMap.containsKey(field)) {
-        resultMap.get(field).add(result);
+      if (groupMap.containsKey(idField)) {
+        groupMap.get(idField).add(result);
       } else {
-        List<JobResult> list = new ArrayList<JobResult>();
+        List<AppResult> list = new ArrayList<AppResult>();
         list.add(result);
-        resultMap.put(field, list);
+        groupMap.put(idField, list);
       }
     }
+
+    // Construct the final result map with the key as a (id, url) pair.
+    Map<IdUrlPair, List<AppResult>> resultMap = new LinkedHashMap<IdUrlPair, List<AppResult>>();
+    for (Map.Entry<String, List<AppResult>> entry : groupMap.entrySet()) {
+      String key = entry.getKey();
+      List<AppResult> value = entry.getValue();
+      resultMap.put(new IdUrlPair(key, idUrlMap.get(key)), value);
+    }
+
     return resultMap;
   }
 
@@ -664,19 +761,33 @@ private static Map<String, List<JobResult>> groupJobs(List<JobResult> results, G
    */
   public static Result restSearch() {
     DynamicForm form = Form.form().bindFromRequest(request());
-    String jobId = form.get(JOB_ID);
-    jobId = jobId != null ? jobId.trim() : "";
-    String flowUrl = form.get(FLOW_URL);
-    flowUrl = (flowUrl != null) ? flowUrl.trim() : null;
-    if (!jobId.isEmpty()) {
-      JobResult result = JobResult.find.byId(jobId);
+    String appId = form.get(APP_ID);
+    appId = appId != null ? appId.trim() : "";
+    if (appId.contains("job")) {
+      appId = appId.replaceAll("job", "application");
+    }
+    String flowExecId = form.get(FLOW_EXEC_ID);
+    flowExecId = (flowExecId != null) ? flowExecId.trim() : null;
+    if (!appId.isEmpty()) {
+      AppResult result = AppResult.find.select("*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+              + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+          .where()
+          .idEq(appId).findUnique();
       if (result != null) {
         return ok(Json.toJson(result));
       } else {
-        return notFound("Unable to find record on job id: " + jobId);
+        return notFound("Unable to find record on id: " + appId);
       }
-    } else if (flowUrl != null && !flowUrl.isEmpty()) {
-      List<JobResult> results = JobResult.find.where().eq(JobResult.TABLE.FLOW_EXEC_URL, flowUrl).findList();
+    } else if (flowExecId != null && !flowExecId.isEmpty()) {
+      List<AppResult> results = AppResult.find
+          .select("*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+              + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+          .where().eq(AppResult.TABLE.FLOW_EXEC_ID, flowExecId)
+          .findList();
       return ok(Json.toJson(results));
     }
 
@@ -688,23 +799,65 @@ public static Result restSearch() {
       }
     }
 
-    ExpressionList<JobResult> query = generateQuery();
-    List<JobResult> results =
-        query.order().desc(JobResult.TABLE.ANALYSIS_TIME).setFirstRow((page - 1) * REST_PAGE_LENGTH)
-            .setMaxRows(REST_PAGE_LENGTH).findList();
+    ExpressionList<AppResult> searchQuery = AppResult.find.select("*").where();
+    ExpressionList<AppResult> query = generateSearchQuery(searchQuery);
+    List<AppResult> results = query
+        .order().desc(AppResult.TABLE.FINISH_TIME)
+        .setFirstRow((page - 1) * REST_PAGE_LENGTH)
+        .setMaxRows(REST_PAGE_LENGTH)
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "." + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+        .findList();
     return ok(Json.toJson(results));
   }
 
   /**
    * The Rest API for Compare Feature
+   * E.g., localhost:8080/rest/compare?flow-exec-id1=abc&flow-exec-id2=xyz
    */
   public static Result restCompare() {
     DynamicForm form = Form.form().bindFromRequest(request());
-    String flowExecUrl1 = form.get(COMPARE_FLOW_URL1);
-    flowExecUrl1 = (flowExecUrl1 != null) ? flowExecUrl1.trim() : null;
-    String flowExecUrl2 = form.get(COMPARE_FLOW_URL2);
-    flowExecUrl2 = (flowExecUrl2 != null) ? flowExecUrl2.trim() : null;
-    return ok(Json.toJson(compareFlows(flowExecUrl1, flowExecUrl2)));
+    String flowExecId1 = form.get(COMPARE_FLOW_ID1);
+    flowExecId1 = (flowExecId1 != null) ? flowExecId1.trim() : null;
+    String flowExecId2 = form.get(COMPARE_FLOW_ID2);
+    flowExecId2 = (flowExecId2 != null) ? flowExecId2.trim() : null;
+
+    List<AppResult> results1 = null;
+    List<AppResult> results2 = null;
+    if (flowExecId1 != null && !flowExecId1.isEmpty() && flowExecId2 != null && !flowExecId2.isEmpty()) {
+      results1 = AppResult.find
+          .select("*").where()
+          .eq(AppResult.TABLE.FLOW_EXEC_ID, flowExecId1).setMaxRows(100)
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+              + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+          .findList();
+      results2 = AppResult.find
+          .select("*").where()
+          .eq(AppResult.TABLE.FLOW_EXEC_ID, flowExecId2).setMaxRows(100)
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+          .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+              + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+          .findList();
+    }
+
+    Map<IdUrlPair, Map<IdUrlPair, List<AppResult>>> compareResults = compareFlows(results1, results2);
+
+    Map<String, Map<String, List<AppResult>>> resMap = new HashMap<String, Map<String, List<AppResult>>>();
+    for (Map.Entry<IdUrlPair, Map<IdUrlPair, List<AppResult>>> entry : compareResults.entrySet()) {
+      IdUrlPair jobExecPair = entry.getKey();
+      Map<IdUrlPair, List<AppResult>> value = entry.getValue();
+      for (Map.Entry<IdUrlPair, List<AppResult>> innerEntry : value.entrySet()) {
+        IdUrlPair flowExecPair = innerEntry.getKey();
+        List<AppResult> results = innerEntry.getValue();
+        Map<String, List<AppResult>> resultMap = new HashMap<String, List<AppResult>>();
+        resultMap.put(flowExecPair.getId(), results);
+        resMap.put(jobExecPair.getId(), resultMap);
+      }
+
+    }
+
+    return ok(Json.toJson(resMap));
   }
 
   /**
@@ -714,29 +867,33 @@ public static Result restCompare() {
    * {@code
    *   [
    *     {
-   *       "flowtime": <Last job's analysis_time>,
+   *       "flowtime": <Last job's finish time>,
    *       "score": 1000,
    *       "jobscores": [
    *         {
    *           "jobdefurl:" "url",
+   *           "jobexecurl:" "url",
    *           "jobscore": 500
    *         },
    *         {
    *           "jobdefurl:" "url",
+   *           "jobexecurl:" "url",
    *           "jobscore": 500
    *         }
    *       ]
    *     },
    *     {
-   *       "flowtime": <Last job's analysis_time>,
+   *       "flowtime": <Last job's finish time>,
    *       "score": 700,
    *       "jobscores": [
    *         {
    *           "jobdefurl:" "url",
+   *           "jobexecurl:" "url",
    *           "jobscore": 0
    *         },
    *         {
    *           "jobdefurl:" "url",
+   *           "jobexecurl:" "url",
    *           "jobscore": 700
    *         }
    *       ]
@@ -745,42 +902,51 @@ public static Result restCompare() {
    * }
    * </pre>
    */
-  public static Result restFlowGraphData(String flowUrl) {
+  public static Result restFlowGraphData(String flowDefId) {
     JsonArray datasets = new JsonArray();
-    if (flowUrl == null || flowUrl.isEmpty()) {
+    if (flowDefId == null || flowDefId.isEmpty()) {
       return ok(new Gson().toJson(datasets));
     }
 
     // Fetch available flow executions with latest JOB_HISTORY_LIMIT mr jobs.
-    List<JobResult> results = JobResult.find.where().eq(JobResult.TABLE.FLOW_URL, flowUrl).order()
-        .desc(JobResult.TABLE.ANALYSIS_TIME).setMaxRows(JOB_HISTORY_LIMIT).findList();
+    List<AppResult> results = AppResult.find
+        .select("*")
+        .where().eq(AppResult.TABLE.FLOW_DEF_ID, flowDefId)
+        .order().desc(AppResult.TABLE.FINISH_TIME)
+        .setMaxRows(JOB_HISTORY_LIMIT)
+        // The 2nd and 3rd table are not required for plotting the graph
+        //.fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, AppHeuristicResult.getSearchFields())
+        //.fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS + "."
+        //    + AppHeuristicResult.TABLE.APP_HEURISTIC_RESULT_DETAILS, "*")
+        .findList();
     if (results.size() == 0) {
       logger.info("No results for Job url");
     }
-    Map<String, List<JobResult>> flowExecUrlToJobsMap =  limitHistoryResults(
-        groupJobs(results, GroupBy.FLOW_EXECUTION_URL), results.size(), MAX_HISTORY_LIMIT);
+    Map<IdUrlPair, List<AppResult>> flowExecIdToJobsMap =  limitHistoryResults(
+        groupJobs(results, GroupBy.FLOW_EXECUTION_ID), results.size(), MAX_HISTORY_LIMIT);
 
     // Compute the graph data starting from the earliest available execution to latest
-    List<String> keyList = new ArrayList<String>(flowExecUrlToJobsMap.keySet());
+    List<IdUrlPair> keyList = new ArrayList<IdUrlPair>(flowExecIdToJobsMap.keySet());
     for(int i = keyList.size() - 1; i >= 0; i--) {
-      String flowExecUrl = keyList.get(i);
+      IdUrlPair flowExecPair = keyList.get(i);
       int flowPerfScore = 0;
       JsonArray jobScores = new JsonArray();
-      List<JobResult> mrJobsList = Lists.reverse(flowExecUrlToJobsMap.get(flowExecUrl));
-      Map<String, List<JobResult>> jobDefUrlToJobsMap = groupJobs(mrJobsList, GroupBy.JOB_DEFINITION_URL);
+      List<AppResult> mrJobsList = Lists.reverse(flowExecIdToJobsMap.get(flowExecPair));
+      Map<IdUrlPair, List<AppResult>> jobDefIdToJobsMap = groupJobs(mrJobsList, GroupBy.JOB_DEFINITION_ID);
 
-      // Compute the execution records
-      for (String jobDefUrl : jobDefUrlToJobsMap.keySet()) {
+      // Compute the execution records. Note that each entry in the jobDefIdToJobsMap will have at least one AppResult
+      for (IdUrlPair jobDefPair : jobDefIdToJobsMap.keySet()) {
         // Compute job perf score
         int jobPerfScore = 0;
-        for (JobResult job : jobDefUrlToJobsMap.get(jobDefUrl)) {
-          jobPerfScore += getMRJobScore(job);
+        for (AppResult job : jobDefIdToJobsMap.get(jobDefPair)) {
+          jobPerfScore += job.score;
         }
 
         // A job in jobscores list
         JsonObject jobScore = new JsonObject();
         jobScore.addProperty("jobscore", jobPerfScore);
-        jobScore.addProperty("jobdefurl", jobDefUrl);
+        jobScore.addProperty("jobdefurl", jobDefPair.getUrl());
+        jobScore.addProperty("jobexecurl", jobDefIdToJobsMap.get(jobDefPair).get(0).jobExecUrl);
 
         jobScores.add(jobScore);
         flowPerfScore += jobPerfScore;
@@ -788,7 +954,7 @@ public static Result restFlowGraphData(String flowUrl) {
 
       // Execution record
       JsonObject dataset = new JsonObject();
-      dataset.addProperty("flowtime", mrJobsList.get(mrJobsList.size() - 1).analysisTime);
+      dataset.addProperty("flowtime", mrJobsList.get(mrJobsList.size() - 1).finishTime.getTime());
       dataset.addProperty("score", flowPerfScore);
       dataset.add("jobscores", jobScores);
 
@@ -807,7 +973,7 @@ public static Result restFlowGraphData(String flowUrl) {
    * {@code
    *   [
    *     {
-   *       "flowtime": <Last job's analysis_time>,
+   *       "flowtime": <Last job's finish time>,
    *       "score": 1000,
    *       "stagescores": [
    *         {
@@ -821,7 +987,7 @@ public static Result restFlowGraphData(String flowUrl) {
    *       ]
    *     },
    *     {
-   *       "flowtime": <Last job's analysis_time>,
+   *       "flowtime": <Last job's finish time>,
    *       "score": 700,
    *       "stagescores": [
    *         {
@@ -838,39 +1004,43 @@ public static Result restFlowGraphData(String flowUrl) {
    * }
    * </pre>
    */
-  public static Result restJobGraphData(String jobUrl) {
+  public static Result restJobGraphData(String jobDefId) {
     JsonArray datasets = new JsonArray();
-    if (jobUrl == null || jobUrl.isEmpty()) {
+    if (jobDefId == null || jobDefId.isEmpty()) {
       return ok(new Gson().toJson(datasets));
     }
 
     // Fetch available flow executions with latest JOB_HISTORY_LIMIT mr jobs.
-    List<JobResult> results = JobResult.find.where().eq(JobResult.TABLE.JOB_URL, jobUrl).order()
-        .desc(JobResult.TABLE.ANALYSIS_TIME).setMaxRows(JOB_HISTORY_LIMIT).findList();
+    List<AppResult> results = AppResult.find
+        .select(AppResult.getSearchFields() + "," + AppResult.TABLE.FLOW_EXEC_ID + "," + AppResult.TABLE.FLOW_EXEC_URL)
+        .where().eq(AppResult.TABLE.JOB_DEF_ID, jobDefId)
+        .order().desc(AppResult.TABLE.FINISH_TIME).setMaxRows(JOB_HISTORY_LIMIT)
+        .fetch(AppResult.TABLE.APP_HEURISTIC_RESULTS, "*")
+        .findList();
     if (results.size() == 0) {
       logger.info("No results for Job url");
     }
-    Map<String, List<JobResult>> flowExecUrlToJobsMap =  limitHistoryResults(
-        groupJobs(results, GroupBy.FLOW_EXECUTION_URL), results.size(), MAX_HISTORY_LIMIT);
+    Map<IdUrlPair, List<AppResult>> flowExecIdToJobsMap =  limitHistoryResults(
+        groupJobs(results, GroupBy.FLOW_EXECUTION_ID), results.size(), MAX_HISTORY_LIMIT);
 
     // Compute the graph data starting from the earliest available execution to latest
-    List<String> keyList = new ArrayList<String>(flowExecUrlToJobsMap.keySet());
+    List<IdUrlPair> keyList = new ArrayList<IdUrlPair>(flowExecIdToJobsMap.keySet());
     for(int i = keyList.size() - 1; i >= 0; i--) {
-      String flowExecUrl = keyList.get(i);
+      IdUrlPair flowExecPair = keyList.get(i);
       int jobPerfScore = 0;
       JsonArray stageScores = new JsonArray();
-      List<JobResult> mrJobsList = Lists.reverse(flowExecUrlToJobsMap.get(flowExecUrl));
-      for (JobResult job : flowExecUrlToJobsMap.get(flowExecUrl)) {
+      List<AppResult> mrJobsList = Lists.reverse(flowExecIdToJobsMap.get(flowExecPair));
+      for (AppResult appResult : flowExecIdToJobsMap.get(flowExecPair)) {
 
-        // Each MR job triggered by jobUrl for flowExecUrl
+        // Each MR job triggered by jobDefId for flowExecId
         int mrPerfScore = 0;
-        for (JobHeuristicResult heuristicResult : job.heuristicResults) {
-          mrPerfScore += getHeuristicScore(heuristicResult);
+        for (AppHeuristicResult appHeuristicResult : appResult.yarnAppHeuristicResults) {
+          mrPerfScore += appHeuristicResult.score;
         }
 
         // A particular mr stage
         JsonObject stageScore = new JsonObject();
-        stageScore.addProperty("stageid", job.jobId);
+        stageScore.addProperty("stageid", appResult.id);
         stageScore.addProperty("stagescore", mrPerfScore);
 
         stageScores.add(stageScore);
@@ -879,7 +1049,7 @@ public static Result restJobGraphData(String jobUrl) {
 
       // Execution record
       JsonObject dataset = new JsonObject();
-      dataset.addProperty("flowtime", mrJobsList.get(mrJobsList.size() - 1).analysisTime);
+      dataset.addProperty("flowtime", mrJobsList.get(mrJobsList.size() - 1).finishTime.getTime());
       dataset.addProperty("score", jobPerfScore);
       dataset.add("stagescores", stageScores);
 
@@ -889,53 +1059,15 @@ public static Result restJobGraphData(String jobUrl) {
     return ok(new Gson().toJson(datasets));
   }
 
-  /**
-   * Calculates and returns the Heuristic Score for MapReduce Jobs.
-   *
-   * Heuristic Score = Number of Tasks(map/reduce) * Severity (When severity > 1)
-   *
-   * @param heuristicResult The Heuristic whose score has to be computed
-   * @return The Score
-   */
-  private static int getHeuristicScore(JobHeuristicResult heuristicResult) {
-    int heuristicScore = 0;
-
-    int severity = heuristicResult.severity.getValue();
-    if (severity != 0 && severity != 1) {
-      for (String[] dataArray : heuristicResult.getDataArray()) {
-        if (dataArray[0] != null && dataArray[0].toLowerCase().equals("number of tasks")) {
-          return severity * Integer.parseInt(dataArray[1]);
-        }
-      }
-    }
-
-    return heuristicScore;
-  }
-
-  /**
-   * Calculates and return the Mapreduce job score.
-   *
-   * Job Score = Sum of individual Heuristic Scores
-   *
-   * @param job The JobResult whose score has to be computed
-   * @return The Score
-   */
-  private static int getMRJobScore(JobResult job) {
-    int jobScore = 0;
-
-    for (JobHeuristicResult heuristicResult : job.heuristicResults) {
-      jobScore += getHeuristicScore(heuristicResult);
-    }
-
-    return jobScore;
-  }
-
   public static Result testEmail() {
 
     DynamicForm form = Form.form().bindFromRequest(request());
-    String jobId = form.get(JOB_ID);
-    if (jobId != null && !jobId.isEmpty()) {
-      JobResult result = JobResult.find.byId(jobId);
+    String appId = form.get(APP_ID);
+    if (appId.contains("job")) {
+      appId = appId.replaceAll("job", "application");
+    }
+    if (appId != null && !appId.isEmpty()) {
+      AppResult result = AppResult.find.byId(appId);
       if (result != null) {
         return ok(emailcritical.render(result));
       }
diff --git a/app/controllers/IdUrlPair.java b/app/controllers/IdUrlPair.java
new file mode 100644
index 000000000..6d53d3c22
--- /dev/null
+++ b/app/controllers/IdUrlPair.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package controllers;
+
+
+public class IdUrlPair {
+
+  public final String id;
+  public final String url;
+
+  public IdUrlPair(String id, String url) {
+    this.id = id;
+    this.url = url;
+  }
+
+  public String getId() {
+    return id;
+  }
+
+  public String getUrl() {
+    return url;
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((id == null) ? 0 : id.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(final Object obj) {
+    if (this == obj) {
+      return true;
+    }
+    if (obj == null) {
+      return false;
+    }
+    if (getClass() != obj.getClass()) {
+      return false;
+    }
+    final IdUrlPair other = (IdUrlPair) obj;
+    if (id == null) {
+      if (other.getId() != null) {
+        return false;
+      }
+    } else if (!id.equals(other.getId())) {
+      return false;
+    }
+
+    return true;
+  }
+}
diff --git a/app/controllers/QueryHandler.java b/app/controllers/QueryHandler.java
deleted file mode 100644
index ddaaae30b..000000000
--- a/app/controllers/QueryHandler.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Copyright 2016 LinkedIn Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package controllers;
-
-import model.JobHeuristicResult;
-import model.JobResult;
-
-class QueryHandler {
-
-  static StringBuilder getJobResultQuery() {
-    final StringBuilder sqlQueryBuilder = new StringBuilder();
-    sqlQueryBuilder.append("SELECT " + JobResult.getColumnList());
-    sqlQueryBuilder.append(" FROM " + JobResult.TABLE.TABLE_NAME);
-    return sqlQueryBuilder;
-  }
-
-  static StringBuilder getJobResultQueryWithUsernameIndex() {
-    StringBuilder sqlQueryBuilder = getJobResultQuery();
-    setUseIndex(sqlQueryBuilder, JobResult.TABLE.USERNAME_INDEX);
-    return sqlQueryBuilder;
-  }
-
-  static StringBuilder getSqlJoinQuery() {
-    StringBuilder sqlQueryBuilder = getJobResultQuery();
-    sqlQueryBuilder.append(" JOIN " + JobHeuristicResult.TABLE.TABLE_NAME);
-    sqlQueryBuilder.append(" ON " + JobHeuristicResult.TABLE.TABLE_NAME + "." + JobHeuristicResult.TABLE.JOB_JOB_ID
-        + " = " + JobResult.TABLE.TABLE_NAME + "." + JobResult.TABLE.JOB_ID);
-    return sqlQueryBuilder;
-  }
-
-  static StringBuilder getSqlJoinQueryWithUsernameIndex() {
-    StringBuilder sqlQueryBuilder = getJobResultQuery();
-    sqlQueryBuilder = setUseIndex(sqlQueryBuilder, JobResult.TABLE.USERNAME_INDEX);
-    sqlQueryBuilder.append(" JOIN " + JobHeuristicResult.TABLE.TABLE_NAME);
-    sqlQueryBuilder.append(" ON " + JobHeuristicResult.TABLE.TABLE_NAME + "." + JobHeuristicResult.TABLE.JOB_JOB_ID
-        + " = " + JobResult.TABLE.TABLE_NAME + "." + JobResult.TABLE.JOB_ID);
-    return sqlQueryBuilder;
-  }
-
-  private static StringBuilder setUseIndex(StringBuilder sqlQueryBuilder, String index) {
-    return sqlQueryBuilder.append(" USE INDEX ( " + index + ")");
-  }
-
-}
diff --git a/app/model/JobResult.java b/app/model/JobResult.java
deleted file mode 100644
index 40e5e35ae..000000000
--- a/app/model/JobResult.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright 2016 LinkedIn Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License"); you may not
- * use this file except in compliance with the License. You may obtain a copy of
- * the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-
-package model;
-
-import com.fasterxml.jackson.annotation.JsonManagedReference;
-import com.linkedin.drelephant.analysis.Severity;
-
-import play.db.ebean.Model;
-
-import java.util.List;
-
-import javax.persistence.CascadeType;
-import javax.persistence.Column;
-import javax.persistence.Entity;
-import javax.persistence.Id;
-import javax.persistence.OneToMany;
-
-import org.apache.commons.lang.StringUtils;
-
-
-@Entity
-public class JobResult extends Model {
-
-  private static final long serialVersionUID = 1L;
-  public static final int URL_LEN_LIMIT = 2048;
-
-  public static class TABLE {
-    public static final String TABLE_NAME = "job_result";
-    public static final String JOB_ID = "job_id";
-    public static final String USERNAME = "username";
-    public static final String JOB_NAME = "job_name";
-    public static final String START_TIME = "start_time";
-    public static final String ANALYSIS_TIME = "analysis_time";
-    public static final String SEVERITY = "severity";
-    public static final String JOB_TYPE = "job_type";
-    public static final String URL = "url";
-    public static final String CLUSTER = "cluster";
-    public static final String JOB_EXEC_URL = "job_exec_url";
-    public static final String JOB_URL = "job_url";
-    public static final String FLOW_EXEC_URL = "flow_exec_url";
-    public static final String FLOW_URL = "flow_url";
-    public static final String[] TABLE_COLUMNS = {
-      "job_result.job_id",
-      "job_result.username",
-      "job_result.job_name",
-      "job_result.start_time",
-      "job_result.analysis_time",
-      "job_result.severity",
-      "job_result.job_type",
-      "job_result.url",
-      "job_result.cluster",
-      "job_result.job_exec_url",
-      "job_result.job_url",
-      "job_result.flow_exec_url",
-      "job_result.flow_url"
-    };
-    public static final String USERNAME_INDEX = "ix_job_result_username_1";
-  }
-
-  public static String getColumnList() {
-    return StringUtils.join(TABLE.TABLE_COLUMNS, ',');
-  }
-
-  @Id
-  @Column(length = 50)
-  public String jobId;
-
-  @Column(length = 50)
-  public String username;
-
-  @Column(length = 100)
-  public String jobName;
-
-  @Column
-  public long startTime;
-
-  @Column
-  public long analysisTime;
-
-  @Column
-  public Severity severity;
-
-  @Column
-  public String jobType;
-
-  @Column(length = 200)
-  public String url;
-
-  @Column(length = 100)
-  public String cluster;
-
-  @Column(length = URL_LEN_LIMIT)
-  public String jobExecUrl;
-
-  @Column(length = URL_LEN_LIMIT)
-  public String jobUrl;
-
-  @Column(length = URL_LEN_LIMIT)
-  public String flowExecUrl;
-
-  @Column(length = URL_LEN_LIMIT)
-  public String flowUrl;
-
-  @JsonManagedReference
-  @OneToMany(cascade = CascadeType.ALL, mappedBy = "job")
-  public List<JobHeuristicResult> heuristicResults;
-
-  public static Finder<String, JobResult> find = new Finder<String, JobResult>(String.class, JobResult.class);
-}
diff --git a/app/model/JobHeuristicResult.java b/app/models/AppHeuristicResult.java
similarity index 52%
rename from app/model/JobHeuristicResult.java
rename to app/models/AppHeuristicResult.java
index 64686cc3f..71bea5da8 100644
--- a/app/model/JobHeuristicResult.java
+++ b/app/models/AppHeuristicResult.java
@@ -14,8 +14,10 @@
  * the License.
  */
 
-package model;
+package models;
 
+import com.fasterxml.jackson.annotation.JsonManagedReference;
+import java.util.List;
 import javax.persistence.CascadeType;
 import javax.persistence.Column;
 import javax.persistence.Entity;
@@ -23,6 +25,8 @@
 import javax.persistence.Lob;
 import javax.persistence.ManyToOne;
 
+import javax.persistence.OneToMany;
+import javax.persistence.Table;
 import org.apache.commons.lang.StringUtils;
 
 import com.fasterxml.jackson.annotation.JsonBackReference;
@@ -34,30 +38,23 @@
 
 
 @Entity
-public class JobHeuristicResult extends Model {
+@Table(name = "yarn_app_heuristic_result")
+public class AppHeuristicResult extends Model {
 
-  private static final long serialVersionUID = 123L;
+  private static final long serialVersionUID = 2L;
 
   public static class TABLE {
-    public static final String TABLE_NAME = "job_heuristic_result";
+    public static final String TABLE_NAME = "yarn_app_heuristic_result";
     public static final String ID = "id";
-    public static final String JOB_JOB_ID = "job_job_id";
+    public static final String APP_RESULT_ID = "yarnAppResult";
+    public static final String HEURISTIC_NAME = "heuristicName";
     public static final String SEVERITY = "severity";
-    public static final String ANALYSIS_NAME = "analysis_name";
-    public static final String DATA_COLUMNS = "data_columns";
-    public static final String DATA = "data";
-    public static final String TABLE_COLUMNS[] = {
-      "job_heuristic_result.id",
-      "job_heuristic_result.job_job_id",
-      "job_heuristic_result.severity",
-      "job_heuristic_result.analysis_name",
-      "job_heuristic_result.data_columns",
-      "job_heuristic_result.data"
-    };
+    public static final String SCORE = "score";
+    public static final String APP_HEURISTIC_RESULT_DETAILS = "yarnAppHeuristicResultDetails";
   }
 
-  public static String getColumnList() {
-    return StringUtils.join(TABLE.TABLE_COLUMNS, ',');
+  public static String getSearchFields() {
+    return Utils.commaSeparated(AppHeuristicResult.TABLE.HEURISTIC_NAME, AppHeuristicResult.TABLE.SEVERITY);
   }
 
   @JsonIgnore
@@ -66,23 +63,22 @@ public static String getColumnList() {
 
   @JsonBackReference
   @ManyToOne(cascade = CascadeType.ALL)
-  public JobResult job;
+  public AppResult yarnAppResult;
 
-  @Column
-  public Severity severity;
+  @Column(nullable = false)
+  public String heuristicClass;
 
-  @Column
-  public String analysisName;
+  @Column(nullable = false)
+  public String heuristicName;
 
-  @JsonIgnore
-  @Lob
-  public String data;
+  @Column(nullable = false)
+  public Severity severity;
 
-  @JsonIgnore
-  @Column
-  public int dataColumns;
+  @Column(nullable = false)
+  public int score;
 
-  public String[][] getDataArray() {
-    return Utils.parseCsvLines(data);
-  }
-}
+  @JsonManagedReference
+  @OneToMany(cascade = CascadeType.ALL, mappedBy = "yarnAppHeuristicResult")
+  public List<AppHeuristicResultDetails> yarnAppHeuristicResultDetails;
+
+}
\ No newline at end of file
diff --git a/app/models/AppHeuristicResultDetails.java b/app/models/AppHeuristicResultDetails.java
new file mode 100644
index 000000000..0975fc6bb
--- /dev/null
+++ b/app/models/AppHeuristicResultDetails.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package models;
+
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import javax.persistence.CascadeType;
+import javax.persistence.Column;
+import javax.persistence.Embeddable;
+import javax.persistence.EmbeddedId;
+import javax.persistence.Entity;
+import javax.persistence.ManyToOne;
+
+import javax.persistence.Table;
+
+import com.fasterxml.jackson.annotation.JsonBackReference;
+
+import play.db.ebean.Model;
+
+
+@Entity
+@Table(name = "yarn_app_heuristic_result_details")
+public class AppHeuristicResultDetails extends Model {
+
+  private static final long serialVersionUID = 3L;
+
+  public static class TABLE {
+    public static final String TABLE_NAME = "yarn_app_heuristic_result_details";
+    public static final String APP_HEURISTIC_RESULT_ID = "yarnAppHeuristicResult";
+    public static final String NAME = "name";
+    public static final String VALUE = "value";
+    public static final String DETAILS = "details";
+  }
+
+  @JsonBackReference
+  @ManyToOne(cascade = CascadeType.ALL)
+  public AppHeuristicResult yarnAppHeuristicResult;
+
+  @Column(length=128, nullable = false)
+  public String name;
+
+  @Column(length=255, nullable = false)
+  public String value;
+
+  @Column(nullable = true)
+  public String details;
+
+}
diff --git a/app/models/AppResult.java b/app/models/AppResult.java
new file mode 100644
index 000000000..368a616ec
--- /dev/null
+++ b/app/models/AppResult.java
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package models;
+
+import com.fasterxml.jackson.annotation.JsonManagedReference;
+import com.linkedin.drelephant.analysis.Severity;
+
+import com.linkedin.drelephant.util.Utils;
+import java.util.Date;
+import play.db.ebean.Model;
+
+import java.util.List;
+
+import javax.persistence.CascadeType;
+import javax.persistence.Column;
+import javax.persistence.Entity;
+import javax.persistence.Id;
+import javax.persistence.OneToMany;
+import javax.persistence.Table;
+
+
+@Entity
+@Table(name = "yarn_app_result")
+public class AppResult extends Model {
+
+  private static final long serialVersionUID = 1L;
+  public static final int URL_LEN_LIMIT = 800;
+
+  // Note that the Table column constants are actually the java variable names defined in this model.
+  // This is because ebean operations require the model variable names to be passed as strings.
+  public static class TABLE {
+    public static final String TABLE_NAME = "yarn_app_result";
+    public static final String ID = "id";
+    public static final String NAME = "name";
+    public static final String USERNAME = "username";
+    public static final String START_TIME = "startTime";
+    public static final String FINISH_TIME = "finishTime";
+    public static final String TRACKING_URL = "trackingUrl";
+    public static final String JOB_TYPE = "jobType";
+    public static final String SEVERITY = "severity";
+    public static final String SCORE = "score";
+    public static final String WORKFLOW_DEPTH = "workflowDepth";
+    public static final String SCHEDULER = "scheduler";
+    public static final String JOB_NAME = "jobName";
+    public static final String JOB_EXEC_ID = "jobExecId";
+    public static final String FLOW_EXEC_ID = "flowExecId";
+    public static final String JOB_DEF_ID = "jobDefId";
+    public static final String FLOW_DEF_ID = "flowDefId";
+    public static final String JOB_EXEC_URL = "jobExecUrl";
+    public static final String FLOW_EXEC_URL = "flowExecUrl";
+    public static final String JOB_DEF_URL = "jobDefUrl";
+    public static final String FLOW_DEF_URL = "flowDefUrl";
+    public static final String APP_HEURISTIC_RESULTS = "yarnAppHeuristicResults";
+  }
+
+  public static String getSearchFields() {
+    return Utils.commaSeparated(AppResult.TABLE.NAME, AppResult.TABLE.USERNAME, AppResult.TABLE.JOB_TYPE,
+        AppResult.TABLE.SEVERITY, AppResult.TABLE.FINISH_TIME);
+  }
+
+  @Id
+  @Column(length = 50, unique = true, nullable = false)
+  public String id;
+
+  @Column(length = 100, nullable = false)
+  public String name;
+
+  @Column(length = 50, nullable = false)
+  public String username;
+
+  @Column(length = 50, nullable = false)
+  public String queueName;
+
+  @Column(nullable = false)
+  public Date startTime;
+
+  @Column(nullable = false)
+  public Date finishTime;
+
+  @Column(length = 255, nullable = false)
+  public String trackingUrl;
+
+  @Column(length = 20, nullable = false)
+  public String jobType;
+
+  @Column(nullable = false)
+  public Severity severity;
+
+  @Column(nullable = false)
+  public int score;
+
+  @Column(nullable = false)
+  public int workflowDepth;
+
+  @Column(length = 20, nullable = true)
+  public String scheduler;
+
+  @Column(length = 255, nullable = false)
+  public String jobName;
+
+  @Column(length = URL_LEN_LIMIT, nullable = false)
+  public String jobExecId;
+
+  @Column(length = 255, nullable = false)
+  public String flowExecId;
+
+  @Column(length = URL_LEN_LIMIT, nullable = false)
+  public String jobDefId;
+
+  @Column(length = URL_LEN_LIMIT, nullable = false)
+  public String flowDefId;
+
+  @Column(length = URL_LEN_LIMIT, nullable = false)
+  public String jobExecUrl;
+
+  @Column(length = URL_LEN_LIMIT, nullable = false)
+  public String flowExecUrl;
+
+  @Column(length = URL_LEN_LIMIT, nullable = false)
+  public String jobDefUrl;
+
+  @Column(length = URL_LEN_LIMIT, nullable = false)
+  public String flowDefUrl;
+
+  @JsonManagedReference
+  @OneToMany(cascade = CascadeType.ALL, mappedBy = "yarnAppResult")
+  public List<AppHeuristicResult> yarnAppHeuristicResults;
+
+  public static Finder<String, AppResult> find = new Finder<String, AppResult>(String.class, AppResult.class);
+}
diff --git a/app/views/emailcritical.scala.html b/app/views/emailcritical.scala.html
index cb384e2d4..45aa0f1bb 100644
--- a/app/views/emailcritical.scala.html
+++ b/app/views/emailcritical.scala.html
@@ -14,7 +14,7 @@
 * the License.
 *@
 
-@(result: model.JobResult)
+@(result: models.AppResult)
 
 <html>
   <head></head>
@@ -24,23 +24,23 @@ <h3>Dr. Elephant needs your help!</h3>
 
       <p>Your Hadoop job is endangering the elephants living in our cluster.</p>
       <p>
-        Please take a look <a href='http://eat1-magicaz01.grid.linkedin.com:8080/search?id=@result.jobId'><b>here</b></a> to figure out
+        Please take a look <a href='http://eat1-magicaz01.grid.linkedin.com:8080/search?id=@result.id'><b>here</b></a> to figure out
         what chemicals is causing the elephants to go nuts.
       </p>
       <p>
         Here are the aspects of the job that we need to look at: <br>
         <ul>
-          @for(heuristicResult <- result.heuristicResults) {
-            @if(heuristicResult.severity == com.linkedin.drelephant.analysis.Severity.CRITICAL) {
-              <li>@heuristicResult.analysisName</li>
+          @for(appHeuristicResult <- result.yarnAppHeuristicResults) {
+            @if(appHeuristicResult.severity == com.linkedin.drelephant.analysis.Severity.CRITICAL) {
+              <li>@appHeuristicResult.heuristicName</li>
             }
           }
         </ul>
       </p>
       <p style="font-size: 0.7em">
-        [<a href='http://eat1-magicaz01.grid.linkedin.com:8080/search?id=@result.jobId'>Dr. Elephant</a>]
-        [<a href='@result.url'>Jobtracker</a>]
-        <br>@result.jobId<br>@result.jobName
+        [<a href='http://eat1-magicaz01.grid.linkedin.com:8080/search?id=@result.id'>Dr. Elephant</a>]
+        [<a href='@result.trackingUrl'>Jobtracker</a>]
+        <br>@result.id<br>@result.name
       </p>
       <p>
         Thanks!
diff --git a/app/views/page/comparePage.scala.html b/app/views/page/comparePage.scala.html
index 88a7bffd0..37fe22dd8 100644
--- a/app/views/page/comparePage.scala.html
+++ b/app/views/page/comparePage.scala.html
@@ -30,13 +30,13 @@
     @tags.panel(){ Compare } {
       <form id="compare-form" role="form" method="get" action="@routes.Application.compare()">
         <div class="form-group">
-          <label for="form-flow-url-1">Flow Execution URL 1</label>
-          <input type="text" class="form-control" id="form-flow-url-1" name="flow-exec-url1" placeholder="Flow Exec URL 1">
+          <label for="form-flow-id-1">Flow Execution URL/ID 1</label>
+          <input type="text" class="form-control" id="form-flow-id-1" name="flow-exec-id1" placeholder="Flow Exec URL/ID 1">
         </div>
 
         <div class="form-group">
-          <label for="form-flow-url-2">Flow Execution URL 2</label>
-          <input type="text" class="form-control" id="form-flow-url-2" name="flow-exec-url2" placeholder="Flow Exec URL 2">
+          <label for="form-flow-id-2">Flow Execution URL/ID 2</label>
+          <input type="text" class="form-control" id="form-flow-id-2" name="flow-exec-id2" placeholder="Flow Exec URL/ID 2">
         </div>
 
         <button type="submit" class="btn btn-default">Compare</button>
diff --git a/app/views/page/flowHistoryPage.scala.html b/app/views/page/flowHistoryPage.scala.html
index 36f7b56c0..74dd9cb0b 100644
--- a/app/views/page/flowHistoryPage.scala.html
+++ b/app/views/page/flowHistoryPage.scala.html
@@ -23,8 +23,8 @@
     @tags.panel(){ Flow History } {
       <form id="flow-history-form" role="form" method="get" action="@routes.Application.flowHistory()">
         <div class="form-group">
-          <label for="form-flow-url">Flow Definition URL</label>
-          <input type="text" class="form-control" id="form-flow-url" name="flow-url" placeholder="Flow Definition URL">
+          <label for="form-flow-def-idurl">Flow Definition URL/ID</label>
+          <input type="text" class="form-control" id="form-flow-def-id" name="flow-def-id" placeholder="Flow Definition URL/ID">
         </div>
 
         <button type="submit" class="btn btn-default">Search</button>
diff --git a/app/views/page/helpPage.scala.html b/app/views/page/helpPage.scala.html
index 5f45bb3e5..131a0d4e7 100644
--- a/app/views/page/helpPage.scala.html
+++ b/app/views/page/helpPage.scala.html
@@ -26,7 +26,7 @@
                 }
                 <br/>
             }
-            <a href="@routes.Application.help()?topic=@helper.urlEncode(com.linkedin.drelephant.analysis.HeuristicResult.NO_DATA.getAnalysis)">@com.linkedin.drelephant.analysis.HeuristicResult.NO_DATA.getAnalysis</a><br>
+            <a href="@routes.Application.help()?topic=@helper.urlEncode(com.linkedin.drelephant.analysis.HeuristicResult.NO_DATA.getHeuristicName)">@com.linkedin.drelephant.analysis.HeuristicResult.NO_DATA.getHeuristicName</a><br>
         }
     }
     @tags.column(9) {
diff --git a/app/views/page/homePage.scala.html b/app/views/page/homePage.scala.html
index d3b5f8e90..431f425d0 100644
--- a/app/views/page/homePage.scala.html
+++ b/app/views/page/homePage.scala.html
@@ -27,8 +27,8 @@
 
 @main("Dr. Elephant", "dashboard") {
   <div class="jumbotron">
-    <h2>Dr. Elephant has been busy!</h2>
-    <p>We looked through <b>@numJobsAnalyzed</b> jobs today.<br>
+    <h2>Hello there, I've been busy!</h2>
+    <p>I looked through <b>@numJobsAnalyzed</b> jobs today.<br>
       About <b>@numJobsSevere</b> of them could use some tuning.<br>
       About <b>@numJobsCritical</b> of them need some serious attention!
     </p>
diff --git a/app/views/page/jobHistoryPage.scala.html b/app/views/page/jobHistoryPage.scala.html
index cd53a5226..aa74d5dec 100644
--- a/app/views/page/jobHistoryPage.scala.html
+++ b/app/views/page/jobHistoryPage.scala.html
@@ -23,8 +23,8 @@
     @tags.panel(){ Job History } {
       <form id="job-history-form" role="form" method="get" action="@routes.Application.jobHistory()">
         <div class="form-group">
-          <label for="form-job-url">Job Definition URL</label>
-          <input type="text" class="form-control" id="form-job-url" name="job-url" placeholder="Job Def URL">
+          <label for="form-job-def-id">Job Definition URL/ID</label>
+          <input type="text" class="form-control" id="form-job-def-id" name="job-def-id" placeholder="Job Def URL/ID">
         </div>
 
         <button type="submit" class="btn btn-default">Search</button>
diff --git a/app/views/page/searchPage.scala.html b/app/views/page/searchPage.scala.html
index 6a77f108a..f8dbfb296 100644
--- a/app/views/page/searchPage.scala.html
+++ b/app/views/page/searchPage.scala.html
@@ -31,16 +31,16 @@
       <form id="search-form" role="form" method="get" action="@routes.Application.search()">
         <!--Job Id, FlowExec Url and the Username test input type-->
         <div class="form-group">
-          <label for="form-job-id">Job ID</label>
+          <label for="form-job-id">Job/App ID</label>
           <input type="text" class="form-control" id="form-job-id" name="id" placeholder="Job ID">
         </div>
         <div class="form-group">
-          <label for="form-flow-url">Flow Execution URL</label>
-          <input type="text" class="form-control" id="form-flow-url" name="flow-url" placeholder="Flow Exec URL">
+          <label for="form-flow-exec-id">Flow Execution URL/ID</label>
+          <input type="text" class="form-control" id="form-flow-exec-id" name="flow-exec-id" placeholder="Flow Exec URL/ID">
         </div>
         <div class="form-group">
-          <label for="form-user">User</label>
-          <input type="text" class="form-control" id="form-user" name="user" placeholder="User">
+          <label for="form-username">User</label>
+          <input type="text" class="form-control" id="form-username" name="username" placeholder="User">
         </div>
 
         <!--Job Type filter-->
@@ -49,10 +49,10 @@
         </div>
         <div class="form-group" style="padding-left:10px;">
           <select class="form-control" id="form-job-type" name="job-type">
-            @for((appType, jobTypeList) <- com.linkedin.drelephant.ElephantContext.instance().getAppTypeToJobTypes()) {
-              <optgroup label="@appType.getName()">
+            @for((appType, jobTypeList) <- com.linkedin.drelephant.ElephantContext.instance().getAppTypeToJobTypes) {
+              <optgroup label="@appType.getName">
               @for(jobType <- jobTypeList) {
-                <option value="@jobType.getName()">@jobType.getName()</option>
+                <option value="@jobType.getName">@jobType.getName</option>
               }
               </optgroup>
             }
@@ -66,14 +66,14 @@
         <div class="form-group" style="padding-left:10px;">
           <select class="form-control" id="form-severity" name="severity">
             @for(severity <- com.linkedin.drelephant.analysis.Severity.values()) {
-              <option value="@severity.getValue()">@severity.getText()</option>
+              <option value="@severity.getValue">@severity.getText</option>
             }
             </select>
         </div>
         <div class="form-group" style="padding-left:10px;">
           <select class="form-control" id="form-analysis" name="analysis">
             <option value="">All Analysis</option>
-            @for((appType, heuristicList) <- com.linkedin.drelephant.ElephantContext.instance().getAllHeuristicNames()) {
+            @for((appType, heuristicList) <- com.linkedin.drelephant.ElephantContext.instance().getAllHeuristicNames) {
               <optgroup label="@appType">
               @for(heuristic <- heuristicList) {
                 <option value="@heuristic">@heuristic</option>
@@ -88,12 +88,12 @@
           <label><input type="checkbox" id="form-datetime-enable" name="datetime-enable" value=""> Job Finish Date</label>
         </div>
         <div class="form-group" style="padding-left:10px;">
-          <input type="text" class="form-control" id="form-finished-time-begin-date" name="finished-time-begin-date" placeholder="From">
+          <input type="text" class="form-control" id="form-finished-time-begin-date" name="finished-time-begin-date" placeholder="From: mm/dd/yyyy">
 
           <input type="hidden" id="form-finished-time-begin" name="finished-time-begin" value="" />
         </div>
         <div class="form-group" style="padding-left:10px;">
-          <input type="text" class="form-control" id="form-finished-time-end-date" name="finished-time-end-date" placeholder="To">
+          <input type="text" class="form-control" id="form-finished-time-end-date" name="finished-time-end-date" placeholder="To: mm/dd/yyyy">
           <input type="hidden" id="form-finished-time-end" name="finished-time-end" value="" />
         </div>
 
diff --git a/app/views/results/compareResults.scala.html b/app/views/results/compareResults.scala.html
index 7b529aa73..615ec2cd6 100644
--- a/app/views/results/compareResults.scala.html
+++ b/app/views/results/compareResults.scala.html
@@ -14,7 +14,7 @@
 * the License.
 *@
 
-@(results: java.util.Map[String, java.util.Map[String, java.util.List[model.JobResult]]])
+@(results: java.util.Map[IdUrlPair, java.util.Map[IdUrlPair, java.util.List[models.AppResult]]])
 
 @*
 * The layout of the comparison results.
@@ -26,17 +26,17 @@
   <div>
     <ul class="list-group">
     @if(results != null){
-      @for( (jobdefurl, flowexecmap) <- results) {
+      @for( (pair, flowexecmap) <- results) {
         <div class="list-group-item-alternate">
-          <p><strong>Job Definition URL: </strong><a href=@jobdefurl>@jobdefurl</a></p>
+          <p><strong>Job Definition URL: </strong><a href=@pair.getUrl>@pair.getUrl</a></p>
           <div class="list-group well-lg">
           <!-- flowexecmap has two entries corresponding to the two flow execution urls and cannot be null. -->
-          @for((flowexecurl, jobs) <- flowexecmap) {
-            <div class="list-group">Flow Execution URL: <a href=@flowexecurl>@flowexecurl</a>
+          @for((pair, jobs) <- flowexecmap) {
+            <div class="list-group">Flow Execution URL: <a href=@pair.getUrl>@pair.getUrl</a>
               @if(jobs != null) {
                 @for(result <- jobs) {
                   <a class="list-group-item list-group-item-@result.severity.getBootstrapColor"
-                  href="@routes.Application.search()?id=@result.jobId">
+                  href="@routes.Application.search()?id=@result.id">
                   @tags.jobSummary(result)
                   </a>
                 }
diff --git a/app/views/results/flowDetails.scala.html b/app/views/results/flowDetails.scala.html
index 7668b72fa..c4f7a3ee5 100644
--- a/app/views/results/flowDetails.scala.html
+++ b/app/views/results/flowDetails.scala.html
@@ -14,7 +14,7 @@
 * the License.
 *@
 
-@(execurl: String, results: java.util.Map[String, java.util.List[model.JobResult]])
+@(execurl: String, results: java.util.Map[IdUrlPair, java.util.List[models.AppResult]])
 
 @*
 * Displays all the mr jobs belonging to a flow grouped by job exec url
@@ -32,13 +32,13 @@ <h3 class="panel-title">
   </div>
 
   <ul class="list-group">
-    @for( (url, jobs) <- results) {
+    @for( (jobExecPair, jobs) <- results) {
       <div class="list-group-item">
-        Job Execution URL: <a href=@url>@url</a>
+        Job Execution URL: <a href=@jobExecPair.getUrl>@jobExecPair.getUrl</a>
         <div class="list-group well-lg">
           @for(result <- jobs) {
             <a class="list-group-item list-group-item-@result.severity.getBootstrapColor"
-               href="@routes.Application.search()?id=@result.jobId">
+               href="@routes.Application.search()?id=@result.id">
               @tags.jobSummary(result)
             </a>
           }
diff --git a/app/views/results/flowHistoryResults.scala.html b/app/views/results/flowHistoryResults.scala.html
index 41e57bdaf..d5906fc13 100644
--- a/app/views/results/flowHistoryResults.scala.html
+++ b/app/views/results/flowHistoryResults.scala.html
@@ -14,8 +14,8 @@
 * the License.
 *@
 
-@(flowUrl: String, results: java.util.Map[String, java.util.Map[String, java.util.List[model.JobResult]]],
-    jobList: java.util.List[String], flowExecTimeList: java.util.List[Long])
+@(flowDefId: String, results: java.util.Map[IdUrlPair, java.util.Map[IdUrlPair, java.util.List[models.AppResult]]],
+    idPairToJobNameMap: java.util.Map[IdUrlPair, String], flowExecTimeList: java.util.List[Long])
 
 @import com.linkedin.drelephant.analysis.Severity
 @import scala.Predef; var jobDefIndex = 0
@@ -35,7 +35,7 @@
 }
 
 @if(results != null && results.nonEmpty) {
-  @tags.panel(){ Flow History Results: <a href="@flowUrl" style="font-size:14px; color:#083d8d">@flowUrl</a>} {
+  @tags.panel(){ Flow History Results: <a href="@flowDefId" style="font-size:14px; color:#083d8d">@flowDefId</a>} {
 
     <script src="@routes.Assets.at("js/flowhistoryform.js")" type="text/javascript"></script>
     <script src="@routes.Assets.at("js/graphutility.js")" type="text/javascript"></script>
@@ -57,37 +57,38 @@
           <thead>
             <tr>
               <th style="width:200px">Flow Executions</th>
-              @for(jobDefUrl <- jobList) {
+              @for((jobDefPair, jobName) <- idPairToJobNameMap) {
                 <th>
-                  <a href='/jobhistory?job-url=@helper.urlEncode(jobDefUrl)' data-toggle='tooltip'
-                  title='@jobDefUrl'>Job @{jobDefIndex = jobDefIndex + 1; jobDefIndex}
-              </a>
+                  <a href='/jobhistory?job-def-id=@helper.urlEncode(jobDefPair.getUrl)' data-toggle='tooltip'
+                  title='@jobDefPair.getUrl'>Job @{jobDefIndex = jobDefIndex + 1; jobDefIndex}<br>
+                  @if(jobName.length > 45) { @jobName.substring(0, 41)... } else { @jobName }
+                  </a>
                 </th>
               }
             </tr>
           </thead>
 
           <tbody>
-          @for((flowExecUrl, jobMap) <- results) {
+          @for((flowExecPair, jobMap) <- results) {
             <tr>
 
               <!-- The First column, execution id -->
               <td style="text-align:center">
-                <a class="exectime" href='@flowExecUrl' data-toggle='tooltip' title='@flowExecUrl'>Loading...</a>
+                <a class="exectime" href='@flowExecPair.getUrl' data-toggle='tooltip' title='@flowExecPair.getUrl'>Loading...</a>
               </td>
 
               <!-- The remaining columns -->
-              @for(jobDefUrl <- jobList) {
+              @for((jobDefPair, jobName) <- idPairToJobNameMap) {
                 <td>
-                @if(jobMap.get(jobDefUrl) != null) {
-                  @for((result,jobCount) <- jobMap.get(jobDefUrl).zipWithIndex) {
-                    <a class="hasTooltip" href='@routes.Application.search()?id=@result.jobId'
+                @if(jobMap.get(jobDefPair) != null) {
+                  @for((result,jobCount) <- jobMap.get(jobDefPair).zipWithIndex) {
+                    <a class="hasTooltip" href='@routes.Application.search()?id=@result.id'
                     style='color: @getSeverityColor(result.severity);'>&#9673;
                       <!-- Table Tooltip Content -->
                       <div id="tooltip-div">
-                        <p><b>@result.jobId</b></p>
-                        @for(heuristicResult <- result.heuristicResults) {
-                          <p style="color: @getSeverityColor(heuristicResult.severity);">@heuristicResult.analysisName</p>
+                        <p><b>@result.id</b></p>
+                        @for(yarnAppHeuristicResult <- result.yarnAppHeuristicResults) {
+                          <p style="color: @getSeverityColor(yarnAppHeuristicResult.severity);">@yarnAppHeuristicResult.heuristicName</p>
                         }
                       </div>
                     </a>
diff --git a/app/views/results/jobDetails.scala.html b/app/views/results/jobDetails.scala.html
index 9e509001c..43b9e60be 100644
--- a/app/views/results/jobDetails.scala.html
+++ b/app/views/results/jobDetails.scala.html
@@ -14,12 +14,12 @@
 * the License.
 *@
 
-@(result: model.JobResult)
+@(result: models.AppResult)
 
 @*
 * Displays complete info of the job.
 *
-* @param The job result of type JobResult
+* @param The job result of type AppResult
 *@
 
 <div class="panel panel-default">
@@ -33,48 +33,49 @@
             <tbody>
               <tr>
                 <td>Jobtracker:</td>
-                <td><a href="@result.url">@result.url</a></td>
+                <td><a href="@result.trackingUrl">@result.trackingUrl</a></td>
               </tr>
-              @if(result.jobExecUrl != null){
+              @if(!result.jobExecUrl.isEmpty){
                 <tr>
                   <td>Job execution:</td>
                   <td><a href="@result.jobExecUrl">@result.jobExecUrl</a></td>
                 </tr>
               }
-              @if(result.jobUrl != null){
+              @if(!result.jobDefUrl.isEmpty){
                 <tr>
                   <td>Job definition:</td>
-                  <td><a href="@result.jobUrl">@result.jobUrl</a></td>
+                  <td><a href="@result.jobDefUrl">@result.jobDefUrl</a></td>
                 </tr>
               }
-              @if(result.flowExecUrl != null){
+              @if(!result.flowExecUrl.isEmpty){
                 <tr>
                   <td>Flow execution:</td>
                   <td><a href="@result.flowExecUrl">@result.flowExecUrl</a></td>
                 </tr>
               }
-              @if(result.flowUrl != null){
+              @if(!result.flowDefUrl.isEmpty){
                 <tr>
                   <td>Flow definition:</td>
-                  <td><a href="@result.flowUrl">@result.flowUrl</a></td>
+                  <td><a href="@result.flowDefUrl">@result.flowDefUrl</a></td>
+                </tr>
+              }
+              @if(!result.jobDefId.isEmpty){
+                <tr>
+                  <td><a href="/jobhistory?job-def-id=@helper.urlEncode(result.jobDefId)">Job History</a></td>
+                </tr>
+              }
+              @if(!result.jobDefId.isEmpty){
+                <tr>
+                  <td><a href="/flowhistory?flow-def-id=@helper.urlEncode(result.flowDefId)">Flow&nbsp;History</a></td>
                 </tr>
               }
             </tbody>
           </table>
-          @if(result.flowExecUrl != null){
-            <div>
-              <a href="/flowrelated?flow-exec-url=@helper.urlEncode(result.flowExecUrl)">Other jobs from this flow</a>
-            </div>
-          }
-          @if(result.jobUrl != null){
-            <div><a href="/jobhistory?job-url=@helper.urlEncode(result.jobUrl)">Other executions of this job</a></div>
-          }
         </div>
-
-        <div>&nbsp;</div>
+        <br>
         <div>
-          @for(heuristicResult <- result.heuristicResults) {
-            <a href="#@heuristicResult.analysisName.replace(" ", "")" class="label label-@heuristicResult.severity.getBootstrapColor()">@heuristicResult.analysisName</a>
+          @for(yarnAppHeuristicResult <- result.yarnAppHeuristicResults) {
+            <a href="#@yarnAppHeuristicResult.heuristicName.replace(" ", "")" class="label label-@yarnAppHeuristicResult.severity.getBootstrapColor">@yarnAppHeuristicResult.heuristicName</a>
           }
         </div>
       </p>
@@ -82,28 +83,30 @@
 
     <!--A detailed heuristic info-->
     <div class="list-group">
-      @for(heuristicResult <- result.heuristicResults) {
-        <a name="@heuristicResult.analysisName.replace(" ", "")" class="list-group-item list-group-item-@heuristicResult.severity.getBootstrapColor()">
-          <h4 class="list-group-item-heading">@heuristicResult.analysisName</h4>
-          @defining(heuristicResult.getDataArray()) { data =>
-            <table class="list-group-item-text table table-condensed left-table">
-              <thead><tr>
-                <th colspan="@heuristicResult.dataColumns">
-                  Severity: @heuristicResult.severity.getText()
-                  @if(heuristicResult.severity.getValue() > 1){ <a href="@routes.Application.help()?topic=@helper.urlEncode(heuristicResult.analysisName)">[Explain]</a> }
-                </th>
-              </tr></thead>
-              <tbody>
-                @for(line <- data) {
+      @for(yarnAppHeuristicResult <- result.yarnAppHeuristicResults) {
+        <a name="@yarnAppHeuristicResult.heuristicName.replace(" ", "")" class="list-group-item list-group-item-@yarnAppHeuristicResult.severity.getBootstrapColor">
+          <h4 class="list-group-item-heading">@yarnAppHeuristicResult.heuristicName</h4>
+          <table class="list-group-item-text table table-condensed left-table" style="table-layout:fixed;">
+            <thead><tr>
+              <th colspan="2">
+                Severity: @yarnAppHeuristicResult.severity.getText
+                @if(yarnAppHeuristicResult.severity.getValue > 1){ <a href="@routes.Application.help()?topic=@helper.urlEncode(yarnAppHeuristicResult.heuristicName)">[Explain]</a> }
+              </th>
+            </tr></thead>
+            <tbody>
+              @for(yarnAppHeuristicResultDetail <- yarnAppHeuristicResult.yarnAppHeuristicResultDetails) {
+                <tr>
+                  <td>@yarnAppHeuristicResultDetail.name</td>
+                  <td>@yarnAppHeuristicResultDetail.value</td>
+                </tr>
+                @if(yarnAppHeuristicResultDetail.details != null) {
                   <tr>
-                    @for(cell <- line) {
-                      <td>@cell</td>
-                    }
+                    <td colspan="2"><div class="stacktrace">@yarnAppHeuristicResultDetail.details</div></td>
                   </tr>
                 }
-              </tbody>
-            </table>
-          }
+              }
+            </tbody>
+          </table>
         </a>
       }
     </div>
diff --git a/app/views/results/jobHistoryResults.scala.html b/app/views/results/jobHistoryResults.scala.html
index b681979c0..dc5126e58 100644
--- a/app/views/results/jobHistoryResults.scala.html
+++ b/app/views/results/jobHistoryResults.scala.html
@@ -14,7 +14,7 @@
 * the License.
 *@
 
-@(jobUrl: String, results: java.util.Map[String, java.util.List[model.JobResult]], maxStages: Int,
+@(jobDefId: String, results: java.util.Map[IdUrlPair, java.util.List[models.AppResult]], maxStages: Int,
     flowExecTimeList:java.util.List[Long])
 
 @import com.linkedin.drelephant.analysis.Severity
@@ -22,19 +22,19 @@
 @getSeverityColor(severity : Severity) = @{
   var color: String  = "#5cb85c"; // LOW or NONE
 
-  if(severity.getText().equalsIgnoreCase("CRITICAL")) {
-    color = "#d9534f";
-  } else if(severity.getText().equalsIgnoreCase("SEVERE")) {
-    color = "#e4804e";
-  } else if(severity.getText().equalsIgnoreCase("MODERATE")) {
-    color = "#f0ad4e";
+  if(severity.getText.equalsIgnoreCase("CRITICAL")) {
+    color = "#d9534f"
+  } else if(severity.getText.equalsIgnoreCase("SEVERE")) {
+    color = "#e4804e"
+  } else if(severity.getText.equalsIgnoreCase("MODERATE")) {
+    color = "#f0ad4e"
   }
 
   color
 }
 
 @if(results != null && results.nonEmpty) {
-  @tags.panel(){ Job History Results: <a href="@jobUrl" style="font-size:14px; color:#083d8d">@jobUrl</a>} {
+  @tags.panel(){ Job History Results: <a href="@jobDefId" style="font-size:14px; color:#083d8d">@jobDefId</a>} {
     <script src="@routes.Assets.at("js/jobhistoryform.js")" type="text/javascript"></script>
     <script src="@routes.Assets.at("js/graphutility.js")" type="text/javascript"></script>
 
@@ -64,32 +64,31 @@
           </thead>
 
           <tbody>
-          @for((flowExecUrl, jobs) <- results) {
+          @for((flowExecPair, jobs) <- results) {
             <tr>
 
               <!-- The First column, execution time -->
               <td style="text-align:center">
-                <a class="exectime" href='@flowExecUrl' data-toggle='tooltip' title='@flowExecUrl'>Loading...</a>
+                <a class="exectime" href='@flowExecPair.getUrl' data-toggle='tooltip' title='@flowExecPair.getUrl'>Loading...</a>
               </td>
 
               <!-- The remaining columns -->
               @for(i <- 1 to maxStages) {
                 <td>
                 @if(i <= jobs.length) {
-                  @for((heuristicResult, jobCount) <- jobs(i-1).heuristicResults.zipWithIndex) {
-                    <a class="hasTooltip" href='@routes.Application.search()?id=@heuristicResult.job.jobId'
-                    style='color: @getSeverityColor(heuristicResult.severity);'>&#9673;
+                  @for((appHeuristicResult, jobCount) <- jobs(i-1).yarnAppHeuristicResults.zipWithIndex) {
+                    <a class="hasTooltip" href='@routes.Application.search()?id=@appHeuristicResult.yarnAppResult.id'
+                    style='color: @getSeverityColor(appHeuristicResult.severity);'>&#9673;
                       <!-- Table Tooltip Content -->
                       <div id="tooltip-div">
-                        <p><b>@heuristicResult.job.jobId</b></p>
-                        <p><b>@heuristicResult.analysisName</b></p>
+                        <p><b>@appHeuristicResult.yarnAppResult.id</b></p>
+                        <p><b>@appHeuristicResult.heuristicName</b></p>
                         <table class="list-group-item-text table table-condensed" style="color:black;">
                           <tbody>
-                          @for(dataArray <- heuristicResult.getDataArray) {
+                          @for(appHeuristicResultDetail <- appHeuristicResult.yarnAppHeuristicResultDetails) {
                             <tr>
-                            @for(cell <- dataArray) {
-                              <td>@cell</td>
-                            }
+                              <td>@appHeuristicResultDetail.name</td>
+                              <td>@appHeuristicResultDetail.value</td>
                             </tr>
                           }
                           </tbody>
diff --git a/app/views/results/searchResults.scala.html b/app/views/results/searchResults.scala.html
index 92aea5a05..3985e145a 100644
--- a/app/views/results/searchResults.scala.html
+++ b/app/views/results/searchResults.scala.html
@@ -14,7 +14,7 @@
 * the License.
 *@
 
-@(title: String, results: java.util.List[model.JobResult])
+@(title: String, results: java.util.List[models.AppResult])
 
 @*
 * The layout of the job results.
@@ -27,12 +27,14 @@
   <div class="panel-heading">
     <h3 class="panel-title">@title</h3>
   </div>
+  @if(results != null && results.nonEmpty) {
   <div class="list-group">
     @for(result <- results) {
       <a class="list-group-item list-group-item-@result.severity.getBootstrapColor"
-         href="@routes.Application.search()?id=@result.jobId">
+         href="@routes.Application.search()?id=@result.id">
         @tags.jobSummary(result)
       </a>
     }
   </div>
+  }
 </div>
diff --git a/app/views/tags/jobHeader.scala.html b/app/views/tags/jobHeader.scala.html
index cd5222f46..630fca864 100644
--- a/app/views/tags/jobHeader.scala.html
+++ b/app/views/tags/jobHeader.scala.html
@@ -14,7 +14,7 @@
 * the License.
 *@
 
-@(result: model.JobResult)
+@(result: models.AppResult)
 
 @*
 * The job header includes the following info,
@@ -23,16 +23,16 @@
 * The job id
 * And the analysis time
 *
-* @param result The job of type JobResult
+* @param result The job of type AppResult
 *@
 
 <p class="list-group-item-heading">
-  <div class="left"><h4 class="list-group-item-heading">[@result.username] [@result.jobType] @result.jobId</h4></div>
-  <div id="time_@result.jobId" class="right"></div>
-  <div>@result.jobName</div>
+  <div class="left"><h4 class="list-group-item-heading">[@result.username] [@result.jobType] @result.id</h4></div>
+  <div id="time_@result.id" class="right"></div>
+  <div>@result.name</div>
 </p>
 
 <script type="text/javascript">
-  var ts = new Date(@result.analysisTime);
-  document.getElementById("time_@result.jobId").innerHTML = ts.toString();
-</script>
\ No newline at end of file
+  var ts = new Date(@result.finishTime.getTime);
+  document.getElementById("time_@result.id").innerHTML = ts.toString();
+</script>
diff --git a/app/views/tags/jobSummary.scala.html b/app/views/tags/jobSummary.scala.html
index 8ff914b82..871122c60 100644
--- a/app/views/tags/jobSummary.scala.html
+++ b/app/views/tags/jobSummary.scala.html
@@ -14,18 +14,18 @@
 * the License.
 *@
 
-@(result: model.JobResult)
+@(result: models.AppResult)
 
 @*
 * Displays a brief info of the job.
 * The job block includes, the job header and the heuristic info.
 *
-* @param result The job of type JobResult
+* @param result The job of type AppResult
 *@
 
 @tags.jobHeader(result)
 <p>
-  @for(heuristicResult <- result.heuristicResults) {
-    <span class="label label-@heuristicResult.severity.getBootstrapColor()">@heuristicResult.analysisName</span>
+  @for(appHeuristicResult <- result.yarnAppHeuristicResults) {
+    <span class="label label-@appHeuristicResult.severity.getBootstrapColor">@appHeuristicResult.heuristicName</span>
   }
 </p>
\ No newline at end of file
diff --git a/build.sbt b/build.sbt
index 10c5ab6d8..40757e5e6 100644
--- a/build.sbt
+++ b/build.sbt
@@ -27,7 +27,7 @@ libraryDependencies ++= Seq(
   javaEbean,
   cache,
   "commons-io" % "commons-io" % "2.4",
-  "mysql" % "mysql-connector-java" % "5.1.22",
+  "mysql" % "mysql-connector-java" % "5.1.38",
   "org.apache.hadoop" % "hadoop-auth" % "2.3.0",
   "org.apache.commons" % "commons-email" % "1.3.2",
   "org.codehaus.jackson" % "jackson-mapper-asl" % "1.7.3",
diff --git a/conf/application.conf b/conf/application.conf
index 8a0cd098e..c6621e47b 100644
--- a/conf/application.conf
+++ b/conf/application.conf
@@ -70,12 +70,7 @@ evolutionplugin=disabled
 # You can declare as many Ebean servers as you want.
 # By convention, the default server is named `default`
 #
-ebean.default="model.*"
-
-# Logger
-# ~~~~~
-# You can also configure logback (http://logback.qos.ch/),
-# by providing an application-logger.xml file in the conf directory.
+ebean.default= "models.*"
 
 # Root logger:
 logger.root=ERROR
@@ -92,4 +87,3 @@ smtp.port=25
 smtp.from="azkaban-noreply@linkedin.com"
 # smtp.user=azkaban-noreply
 # smtp.password=
-
diff --git a/conf/routes b/conf/routes
index 0e3c24d4a..5fe00c8a6 100644
--- a/conf/routes
+++ b/conf/routes
@@ -20,7 +20,6 @@
 
 # Application calls
 GET     /                           controllers.Application.dashboard()
-GET     /dashboard                  controllers.Application.dashboard()
 GET     /help                       controllers.Application.help()
 GET     /email                      controllers.Application.testEmail()
 GET     /search                     controllers.Application.search()
@@ -29,13 +28,13 @@ GET     /flowhistory                controllers.Application.flowHistory()
 GET     /jobhistory                 controllers.Application.jobHistory()
 
 # Rest calls
-GET     /rest/job                   controllers.Application.restJobResult(id: String)
-GET     /rest/jobexec               controllers.Application.restJobExecResult(url: String)
-GET     /rest/flowexec              controllers.Application.restFlowExecResult(url: String)
+GET     /rest/job                   controllers.Application.restAppResult(id: String)
+GET     /rest/jobexec               controllers.Application.restJobExecResult(id: String)
+GET     /rest/flowexec              controllers.Application.restFlowExecResult(id: String)
 GET     /rest/search                controllers.Application.restSearch()
 GET     /rest/compare               controllers.Application.restCompare()
-GET     /rest/flowgraphdata         controllers.Application.restFlowGraphData(url: String)
-GET     /rest/jobgraphdata          controllers.Application.restJobGraphData(url: String)
+GET     /rest/flowgraphdata         controllers.Application.restFlowGraphData(id: String)
+GET     /rest/jobgraphdata          controllers.Application.restJobGraphData(id: String)
 
 
 # Map static resources from the /public folder to the /assets URL path
diff --git a/project/plugins.sbt b/project/plugins.sbt
index 94816f9a8..e8f765ffb 100644
--- a/project/plugins.sbt
+++ b/project/plugins.sbt
@@ -20,4 +20,4 @@ logLevel := Level.Warn
 resolvers += "Typesafe repository" at "http://repo.typesafe.com/typesafe/releases/"
 
 // Use the Play sbt plugin for Play projects
-addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.2.2")
+addSbtPlugin("com.typesafe.play" % "sbt-plugin" % "2.2.2")
\ No newline at end of file
diff --git a/public/css/main.css b/public/css/main.css
index c7fc0aee4..337e3faa5 100644
--- a/public/css/main.css
+++ b/public/css/main.css
@@ -15,17 +15,19 @@
  */
 
 .left-table {
-    display:table;
+  display:table;
 }
 .left-table tr {
-    display:table-row
+  display:table-row;
+  white-space:normal;
 }
 .left-table tr td {
-    display: table-cell;
-    white-space: pre;
+  display: table-cell;
+  white-space: pre;
+  word-wrap:break-word;
 }
 .left-table tr td:last-child{
-    width: 100%;
+  width: 100%;
 }​
 
 
@@ -118,10 +120,12 @@ body {
   font-family: "HelveticaNeue-Light", "Helvetica Neue Light", "Helvetica Neue", Arial Narrow, Helvetica, sans-serif;
 }
 .table-responsive {
-  overflow-x: auto
+  overflow-x: auto;
 }
 .table-responsive > .table > thead > tr > th {
   text-align:center;
+  vertical-align:top;
+  word-wrap:break-word;
 }
 .table-responsive > .table > tbody > tr > td {
   text-align:left;
@@ -156,4 +160,17 @@ body {
 .hasTooltip:hover div {
   display:block;
   z-index:1;
+}
+
+.stacktrace{
+  /* For Firefox */
+  white-space: pre-wrap;
+  word-break: break-all;
+
+  /* For Chrome and IE */
+  word-wrap: break-word;
+
+  font-size:11px;
+  font-family:monospace;
+  color:brown;
 }
\ No newline at end of file
diff --git a/public/js/flowhistoryform.js b/public/js/flowhistoryform.js
index ad261d4cd..4d2da68e5 100644
--- a/public/js/flowhistoryform.js
+++ b/public/js/flowhistoryform.js
@@ -17,10 +17,10 @@
 $(document).ready(function(){
 
   /* Plot graph for data obtained from ajax call */
-  $.getJSON('/rest/flowgraphdata?url=' + queryString()['flow-url'], function(data) {
+  $.getJSON('/rest/flowgraphdata?id=' + queryString()['flow-def-id'], function(data) {
     updateExecTimezone(data);
 
-    // Compute the jobDefUrl list such that the job numbers in the tooltip match the corresponding job in the table.
+    // Compute the jobDefId list such that the job numbers in the tooltip match the corresponding job in the table.
     var jobDefList = [];
     for (var i = data.length - 1 ; i >=0 ; i--) {
       for (var j = 0; j < data[i].jobscores.length; j++) {
@@ -94,9 +94,10 @@ function getGraphTooltipContent(record, jobDefList) {
       }
 
       var jobDefUrl = record.jobscores[index]['jobdefurl'];
-      var jobLink = "/jobhistory?job-url=" + encodeURIComponent(jobDefUrl);
+      //var jobLink = "/jobhistory?job-def-id=" + encodeURIComponent(jobDefUrl);
+      var jobExecUrl = record.jobscores[index]['jobexecurl'];
       var jobRef = document.createElement("a");
-      jobRef.setAttribute("href", jobLink);
+      jobRef.setAttribute("href", jobExecUrl);
       jobRef.appendChild(document.createTextNode("Job " + (jobDefList.indexOf(jobDefUrl) + 1)));
 
       var tableCell1 = document.createElement("td");
diff --git a/public/js/graphutility.js b/public/js/graphutility.js
index 8d30c589d..b1731c8ae 100644
--- a/public/js/graphutility.js
+++ b/public/js/graphutility.js
@@ -196,16 +196,24 @@ function plotter(graphData, jobDefList) {
         .attr("transform", "translate(" + xRange(record.flowtime) + "," + yRange(record.score) +")");
 
     // Set position of tooltip.
-    var x = xRange(record.flowtime) - (tooltipWidth) - 20;
-    var y = yRange(record.score) - tooltip.select("body").style("height").replace("px", "")/2 - 10;
+    var x = xRange(record.flowtime) - (tooltipWidth) - 10;
+    var y = yRange(record.score) - tooltip.select("body").style("height").replace("px", "")/2;
+
+    // Don't let the tooltip cross the left margin
     if (x < MARGINS.left) {
-      x = xRange(record.flowtime) + 20;
+      x = xRange(record.flowtime) + 10;
+    }
+
+    // Don't let the tooltip cross the bottom margin
+    if ((yRange(record.score) + tooltip.select("body").style("height").replace("px", "")/2) >= yRange(0)) {
+      y = yRange(record.score) - tooltip.select("body").style("height").replace("px", "") - 10;
     }
+
     tooltip.select("foreignObject")
         .attr("height", tooltip.select("body").style("height"));
     tooltip.select("foreignObject")
         .transition()
-        .duration(100)
+        .duration(75)
         .attr("transform", "translate(" + x + "," + y + ")");
   }
 }
diff --git a/public/js/jobhistoryform.js b/public/js/jobhistoryform.js
index aba945700..4d0eba488 100644
--- a/public/js/jobhistoryform.js
+++ b/public/js/jobhistoryform.js
@@ -17,7 +17,7 @@
 $(document).ready(function(){
 
   /* Plot graph for data obtained from ajax call */
-  $.getJSON('/rest/jobgraphdata?url=' + queryString()['job-url'], function(data) {
+  $.getJSON('/rest/jobgraphdata?id=' + queryString()['job-def-id'], function(data) {
     updateExecTimezone(data);
     plotter(data, []);
   });
diff --git a/public/js/searchform.js b/public/js/searchform.js
index 65681bb90..24f96190e 100644
--- a/public/js/searchform.js
+++ b/public/js/searchform.js
@@ -19,9 +19,9 @@ $(document).ready(function(){
   var form = $("#search-form");
   var formSubmit = $("#submit-button");
 
-  var jobid = $("#form-job-id");
-  var flowurl = $("#form-flow-url");
-  var user = $("#form-user");
+  var jobId = $("#form-job-id");
+  var flowExecId = $("#form-flow-exec-id");
+  var user = $("#form-username");
   var jobtypeEnable = $("#form-job-type-enable");
   var jobtype = $("#form-job-type");
   var severityEnable = $("#form-severity-enable");
@@ -43,8 +43,8 @@ $(document).ready(function(){
   });
 
   var updateForm = function(){
-    if(jobid.val()) {
-      flowurl.prop('disabled', true);
+    if(jobId.val()) {
+      flowExecId.prop('disabled', true);
       user.prop('disabled', true);
       severity.prop('disabled', true);
       analysis.prop('disabled', true);
@@ -54,8 +54,8 @@ $(document).ready(function(){
       datetimeEnable.prop('disabled', true);
       finishTimeBeginDate.prop('disabled', true);
       finishTimeEndDate.prop('disabled', true);
-    } else if(flowurl.val()) {
-      jobid.prop('disabled', true);
+    } else if(flowExecId.val()) {
+      jobId.prop('disabled', true);
       user.prop('disabled', true);
       severity.prop('disabled', true);
       analysis.prop('disabled', true);
@@ -67,8 +67,8 @@ $(document).ready(function(){
       finishTimeEndDate.prop('disabled', true);
     }
     else{
-      jobid.prop('disabled', false);
-      flowurl.prop('disabled', false);
+      jobId.prop('disabled', false);
+      flowExecId.prop('disabled', false);
       jobtypeEnable.prop('disabled', false);
       severityEnable.prop('disabled', false);
       datetimeEnable.prop('disabled', false);
@@ -97,8 +97,8 @@ $(document).ready(function(){
       }
     }
   }
-  jobid.on("propertychange keyup input paste", updateForm);
-  flowurl.on("propertychange keyup input paste", updateForm);
+  jobId.on("propertychange keyup input paste", updateForm);
+  flowExecId.on("propertychange keyup input paste", updateForm);
   jobtypeEnable.change(updateForm);
   severityEnable.change(updateForm);
   datetimeEnable.change(updateForm);
diff --git a/test/com/linkedin/drelephant/analysis/SeverityTest.java b/test/com/linkedin/drelephant/analysis/SeverityTest.java
new file mode 100644
index 000000000..ecc6758c4
--- /dev/null
+++ b/test/com/linkedin/drelephant/analysis/SeverityTest.java
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.linkedin.drelephant.analysis;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+
+public class SeverityTest {
+
+  @Test
+  public void testSeverityMax() {
+    assertEquals(Severity.CRITICAL, Severity.max(Severity.CRITICAL));
+    assertEquals(Severity.CRITICAL, Severity.max(Severity.CRITICAL, Severity.SEVERE));
+    assertEquals(Severity.CRITICAL, Severity.max(Severity.LOW, Severity.LOW, Severity.CRITICAL));
+  }
+
+  @Test
+  public void testSeverityMin() {
+    assertEquals(Severity.NONE, Severity.min(Severity.NONE, Severity.LOW));
+    assertEquals(Severity.LOW, Severity.min(Severity.LOW, Severity.LOW));
+  }
+
+  @Test
+  public void testSeverityAscending() {
+    assertEquals(Severity.CRITICAL, Severity.getSeverityAscending(8, 2, 4, 6, 8));
+    assertEquals(Severity.SEVERE, Severity.getSeverityAscending(10, 2, 4, 6, 12));
+  }
+
+  @Test
+  public void testSeverityDescending() {
+    assertEquals(Severity.CRITICAL, Severity.getSeverityDescending(2, 10, 8, 4, 2));
+    assertEquals(Severity.MODERATE, Severity.getSeverityDescending(5, 10, 8, 4, 2));
+  }
+}
diff --git a/test/com/linkedin/drelephant/configurations/fetcher/FetcherConfigurationTest.java b/test/com/linkedin/drelephant/configurations/fetcher/FetcherConfigurationTest.java
new file mode 100644
index 000000000..552e4bb49
--- /dev/null
+++ b/test/com/linkedin/drelephant/configurations/fetcher/FetcherConfigurationTest.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.linkedin.drelephant.configurations.fetcher;
+
+import java.io.IOException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+
+
+public class FetcherConfigurationTest {
+
+  private static Document document1 = null;
+  private static Document document2 = null;
+  private static Document document3 = null;
+  private static Document document4 = null;
+
+  @BeforeClass
+  public static void runBeforeClass() {
+    try {
+      DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+      DocumentBuilder builder = factory.newDocumentBuilder();
+      document1 = builder.parse(
+          FetcherConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/fetcher/FetcherConfTest1.xml"));
+      document2 = builder.parse(
+          FetcherConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/fetcher/FetcherConfTest2.xml"));
+      document3 = builder.parse(
+          FetcherConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/fetcher/FetcherConfTest3.xml"));
+      document4 = builder.parse(
+          FetcherConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/fetcher/FetcherConfTest4.xml"));
+    } catch (ParserConfigurationException e) {
+      throw new RuntimeException("XML Parser could not be created.", e);
+    } catch (SAXException e) {
+      throw new RuntimeException("Test files are not properly formed", e);
+    } catch (IOException e) {
+      throw new RuntimeException("Unable to read test files ", e);
+    }
+  }
+
+  @Rule
+  public ExpectedException expectedEx = ExpectedException.none();
+
+  /**
+   *  Correctly configured fetcher
+   */
+  @Test
+  public void testParseFetcherConf1() {
+    FetcherConfiguration fetcherConf = new FetcherConfiguration(document1.getDocumentElement());
+    assertEquals(fetcherConf.getFetchersConfigurationData().size(), 2);
+  }
+
+  /**
+   *  No classname field
+   */
+  @Test
+  public void testParseFetcherConf2() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag 'classname' in fetcher 2");
+    FetcherConfiguration fetcherConf = new FetcherConfiguration(document2.getDocumentElement());
+  }
+
+  /**
+   *  Empty classname field
+   */
+  @Test
+  public void testParseFetcherConf3() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("Empty tag 'classname' in fetcher 1");
+    FetcherConfiguration fetcherConf = new FetcherConfiguration(document3.getDocumentElement());
+  }
+
+  /**
+   *  No applicationtype tag
+   */
+  @Test
+  public void testParseFetcherConf4() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag or invalid tag 'applicationtype' in fetcher 1"
+        + " classname com.linkedin.drelephant.mapreduce.MapReduceFetcherHadoop2");
+    FetcherConfiguration fetcherConf = new FetcherConfiguration(document4.getDocumentElement());
+  }
+}
+
diff --git a/test/com/linkedin/drelephant/configurations/heuristic/HeuristicConfigurationTest.java b/test/com/linkedin/drelephant/configurations/heuristic/HeuristicConfigurationTest.java
new file mode 100644
index 000000000..bc9444b7a
--- /dev/null
+++ b/test/com/linkedin/drelephant/configurations/heuristic/HeuristicConfigurationTest.java
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.linkedin.drelephant.configurations.heuristic;
+
+import java.io.IOException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+
+
+public class HeuristicConfigurationTest {
+
+  private static Document document1 = null;
+  private static Document document2 = null;
+  private static Document document3 = null;
+  private static Document document4 = null;
+  private static Document document5 = null;
+
+  @BeforeClass
+  public static void runBeforeClass() {
+    try {
+      DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+      DocumentBuilder builder = factory.newDocumentBuilder();
+      document1 = builder.parse(HeuristicConfigurationTest.class.getClassLoader()
+              .getResourceAsStream("configurations/heuristic/HeuristicConfTest1.xml"));
+      document2 = builder.parse(
+          HeuristicConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/heuristic/HeuristicConfTest2.xml"));
+      document3 = builder.parse(
+          HeuristicConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/heuristic/HeuristicConfTest3.xml"));
+      document4 = builder.parse(
+          HeuristicConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/heuristic/HeuristicConfTest4.xml"));
+      document5 = builder.parse(
+          HeuristicConfigurationTest.class.getClassLoader().getResourceAsStream(
+              "configurations/heuristic/HeuristicConfTest5.xml"));
+    } catch (ParserConfigurationException e) {
+      throw new RuntimeException("XML Parser could not be created.", e);
+    } catch (SAXException e) {
+      throw new RuntimeException("Test files are not properly formed", e);
+    } catch (IOException e) {
+      throw new RuntimeException("Unable to read test files ", e);
+    }
+  }
+
+  @Rule
+  public ExpectedException expectedEx = ExpectedException.none();
+
+  /**
+   *  Correctly configured fetcher
+   */
+  @Test
+  public void testParseFetcherConf1() {
+    HeuristicConfiguration heuristicConf = new HeuristicConfiguration(document1.getDocumentElement());
+    assertEquals(heuristicConf.getHeuristicsConfigurationData().size(), 3);
+  }
+
+  /**
+   * No classname tag
+   */
+  @Test
+  public void testParseFetcherConf2() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag 'classname' in heuristic 1");
+    HeuristicConfiguration heuristicConf = new HeuristicConfiguration(document2.getDocumentElement());
+  }
+
+  /**
+   * No heuristic name tag
+   */
+  @Test
+  public void testParseFetcherConf3() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag 'heuristicname' in heuristic 1 classname"
+        + " com.linkedin.drelephant.mapreduce.heuristics.MapperDataSkewHeuristic");
+    HeuristicConfiguration heuristicConf = new HeuristicConfiguration(document3.getDocumentElement());
+  }
+
+  /**
+   * No view name tag
+   */
+  @Test
+  public void testParseFetcherConf4() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag 'viewname' in heuristic 1 classname"
+        + " com.linkedin.drelephant.mapreduce.heuristics.MapperDataSkewHeuristic");
+    HeuristicConfiguration heuristicConf = new HeuristicConfiguration(document4.getDocumentElement());
+  }
+
+  /**
+   * No application type tag
+   */
+  @Test
+  public void testParseFetcherConf5() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag or invalid tag 'applicationtype' in heuristic 2 classname"
+      + " com.linkedin.drelephant.mapreduce.heuristics.MapperGCHeuristic");
+    HeuristicConfiguration heuristicConf = new HeuristicConfiguration(document5.getDocumentElement());
+  }
+}
+
diff --git a/test/com/linkedin/drelephant/configurations/jobtype/JobTypeConfigurationTest.java b/test/com/linkedin/drelephant/configurations/jobtype/JobTypeConfigurationTest.java
new file mode 100644
index 000000000..37d08e6d6
--- /dev/null
+++ b/test/com/linkedin/drelephant/configurations/jobtype/JobTypeConfigurationTest.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.linkedin.drelephant.configurations.jobtype;
+
+import java.io.IOException;
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.parsers.ParserConfigurationException;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+import static org.junit.Assert.assertEquals;
+
+
+public class JobTypeConfigurationTest {
+
+  private static Document document1 = null;
+  private static Document document2 = null;
+  private static Document document3 = null;
+  private static Document document4 = null;
+  private static Document document5 = null;
+  private static Document document6 = null;
+
+  @BeforeClass
+  public static void runBeforeClass() {
+    try {
+      DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
+      DocumentBuilder builder = factory.newDocumentBuilder();
+      document1 = builder.parse(JobTypeConfigurationTest.class.getClassLoader()
+          .getResourceAsStream("configurations/jobtype/JobTypeConfTest1.xml"));
+      document2 = builder.parse(JobTypeConfigurationTest.class.getClassLoader()
+          .getResourceAsStream("configurations/jobtype/JobTypeConfTest2.xml"));
+      document3 = builder.parse(JobTypeConfigurationTest.class.getClassLoader()
+          .getResourceAsStream("configurations/jobtype/JobTypeConfTest3.xml"));
+      document4 = builder.parse(JobTypeConfigurationTest.class.getClassLoader()
+          .getResourceAsStream("configurations/jobtype/JobTypeConfTest4.xml"));
+      document5 = builder.parse(JobTypeConfigurationTest.class.getClassLoader()
+          .getResourceAsStream("configurations/jobtype/JobTypeConfTest5.xml"));
+      document6 = builder.parse(JobTypeConfigurationTest.class.getClassLoader()
+          .getResourceAsStream("configurations/jobtype/JobTypeConfTest6.xml"));
+    } catch (ParserConfigurationException e) {
+      throw new RuntimeException("XML Parser could not be created.", e);
+    } catch (SAXException e) {
+      throw new RuntimeException("Test files are not properly formed", e);
+    } catch (IOException e) {
+      throw new RuntimeException("Unable to read test files ", e);
+    }
+  }
+
+  @Rule
+  public ExpectedException expectedEx = ExpectedException.none();
+
+  /**
+   * Correctly configured fetcher
+   */
+  @Test
+  public void testParseFetcherConf1() {
+    JobTypeConfiguration jobTypeConf = new JobTypeConfiguration(document1.getDocumentElement());
+    assertEquals(jobTypeConf.getAppTypeToJobTypeList().size(), 2);
+  }
+
+  /**
+   * No name tag
+   */
+  @Test
+  public void testParseFetcherConf2() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag 'jobtype' in jobtype 3");
+    JobTypeConfiguration jobTypeConf = new JobTypeConfiguration(document2.getDocumentElement());
+  }
+
+  /**
+   * No conf tag
+   */
+  @Test
+  public void testParseFetcherConf3() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag 'conf' in jobtype Spark");
+    JobTypeConfiguration jobTypeConf = new JobTypeConfiguration(document3.getDocumentElement());
+  }
+
+  /**
+   * No applicationtype tag
+   */
+  @Test
+  public void testParseFetcherConf4() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("No tag 'applicationtype' in jobtype Pig");
+    JobTypeConfiguration jobTypeConf = new JobTypeConfiguration(document4.getDocumentElement());
+  }
+
+  /**
+   * Wrong pattern for job type
+   */
+  @Test
+  public void testParseFetcherConf5() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("Error processing this pattern.  Pattern:[(voldemort) jobtype:Voldemort");
+    JobTypeConfiguration jobTypeConf = new JobTypeConfiguration(document5.getDocumentElement());
+  }
+
+  /**
+   * Multiple default types
+   */
+  @Test
+  public void testParseFetcherConf6() {
+    expectedEx.expect(RuntimeException.class);
+    expectedEx.expectMessage("Each application type should have one and only one default job type. Duplicate default"
+        + " job type: jobType:Hive, for application type:mapreduce, isDefault:true, confName:hive.mapred.mode,"
+        + " confValue:.*. for application type: MAPREDUCE");
+    JobTypeConfiguration jobTypeConf = new JobTypeConfiguration(document6.getDocumentElement());
+  }
+}
diff --git a/test/com/linkedin/drelephant/mapreduce/MapReduceFetcherHadoop2Test.java b/test/com/linkedin/drelephant/mapreduce/MapReduceFetcherHadoop2Test.java
new file mode 100644
index 000000000..b2d4e4406
--- /dev/null
+++ b/test/com/linkedin/drelephant/mapreduce/MapReduceFetcherHadoop2Test.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.linkedin.drelephant.mapreduce;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+
+public class MapReduceFetcherHadoop2Test {
+
+  @Test
+  public void testDiagnosticMatcher() {
+    Assert.assertEquals("Task[\\s\\u00A0]+(.*)[\\s\\u00A0]+failed[\\s\\u00A0]+([0-9])[\\s\\u00A0]+times[\\s\\u00A0]+",
+        ThreadContextMR2.getDiagnosticMatcher("Task task_1443068695259_9143_m_000475 failed 1 time")
+            .pattern().toString());
+
+    Assert.assertEquals(2, ThreadContextMR2.getDiagnosticMatcher("Task task_1443068695259_9143_m_000475 failed 1 time")
+        .groupCount());
+  }
+
+}
diff --git a/test/com/linkedin/drelephant/math/StatisticsTest.java b/test/com/linkedin/drelephant/math/StatisticsTest.java
new file mode 100644
index 000000000..64268bac1
--- /dev/null
+++ b/test/com/linkedin/drelephant/math/StatisticsTest.java
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package com.linkedin.drelephant.math;
+
+import java.util.ArrayList;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import static org.junit.Assert.assertEquals;
+
+
+public class StatisticsTest {
+
+  @Test
+  public void testAverage1() {
+    assertEquals(6, Statistics.average(new long[]{2, 4, 6, 8, 10}));
+    assertEquals(0, Statistics.average(new long[] {}));
+  }
+
+  @Test
+  public void testAverage2() {
+    ArrayList<Long> list1 = new ArrayList<Long>();
+    list1.add(2l);
+    list1.add(4l);
+    list1.add(6l);
+    list1.add(8l);
+    list1.add(10l);
+    assertEquals(6, Statistics.average(list1));
+
+    ArrayList<Long> list2 = new ArrayList<Long>();
+    assertEquals(0, Statistics.average(list2));
+  }
+
+  @Rule
+  public ExpectedException expectedEx = ExpectedException.none();
+
+  @Test
+  public void testMedian1() {
+    ArrayList<Long> list1 = new ArrayList<Long>();
+    expectedEx.expect(IllegalArgumentException.class);
+    expectedEx.expectMessage("Median of an empty list is not defined.");
+    Statistics.median(list1);
+  }
+
+  @Test
+  public void testMedian2() {
+    ArrayList<Long> list2 = new ArrayList<Long>();
+    list2.add(2l);
+    list2.add(4l);
+    list2.add(6l);
+    list2.add(8l);
+    assertEquals(5, Statistics.median(list2));
+
+    list2.add(15l);
+    assertEquals(6, Statistics.median(list2));
+  }
+
+  @Test
+  public void testDescribeFactor() {
+    assertEquals("", Statistics.describeFactor(0, 0, "test"));
+    assertEquals("(5.00test)", Statistics.describeFactor(10, 2, "test"));
+  }
+
+  @Test
+  public void testReadableTimespan() {
+    assertEquals("0 sec", Statistics.readableTimespan(0));
+    assertEquals("1 sec", Statistics.readableTimespan(1000));
+    assertEquals("1 min", Statistics.readableTimespan(60000));
+    assertEquals("1 hr", Statistics.readableTimespan(3600000));
+  }
+}
diff --git a/test/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristicTest.java b/test/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristicTest.java
index a59b8a3d0..3587114d8 100644
--- a/test/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristicTest.java
+++ b/test/com/linkedin/drelephant/spark/heuristics/MemoryLimitHeuristicTest.java
@@ -2,6 +2,7 @@
 
 import com.linkedin.drelephant.analysis.ApplicationType;
 import com.linkedin.drelephant.analysis.HeuristicResult;
+import com.linkedin.drelephant.analysis.HeuristicResultDetails;
 import com.linkedin.drelephant.analysis.Severity;
 import com.linkedin.drelephant.spark.MockSparkApplicationData;
 import com.linkedin.drelephant.spark.data.SparkApplicationData;
@@ -60,9 +61,9 @@ public void testCombinedRules() {
 
   public void testMissingSparkDriverMemoryProperty() {
     HeuristicResult result = getJobresult(100, "1G", "700M", getPeakMemory(1.0d, 100, "1G"), SPARK_DRIVER_MEMORY);
-    for (String detail : result.getDetails()) {
-      if (detail.startsWith("\"Total driver memory allocated")) {
-        assertEquals("\"Total driver memory allocated\",\"700 MB\"", detail);
+    for (HeuristicResultDetails detail : result.getHeuristicResultDetails()) {
+      if (detail.getName().startsWith("\"Total driver memory allocated")) {
+        assertEquals("\"Total driver memory allocated\",\"700 MB\"", detail.getName());
       }
     }
   }
diff --git a/test/com/linkedin/drelephant/util/UtilsTest.java b/test/com/linkedin/drelephant/util/UtilsTest.java
index 20c930816..f2e175231 100644
--- a/test/com/linkedin/drelephant/util/UtilsTest.java
+++ b/test/com/linkedin/drelephant/util/UtilsTest.java
@@ -16,17 +16,22 @@
 
 package com.linkedin.drelephant.util;
 
-import com.linkedin.drelephant.analysis.ApplicationType;
+
 import java.util.HashMap;
 import java.util.Map;
-import junit.framework.TestCase;
+
+import org.junit.Test;
+import org.w3c.dom.Document;
+
+import static org.junit.Assert.assertEquals;
 
 
 /**
  * This class tests the Utils class
- *
  */
-public class UtilsTest extends TestCase {
+public class UtilsTest {
+
+  @Test
   public void testParseJavaOptions() {
     Map<String, String> options1 = Utils.parseJavaOptions("-Dfoo=bar");
     assertEquals(1, options1.size());
@@ -39,18 +44,21 @@ public void testParseJavaOptions() {
     assertEquals("bar3", options2.get("foo3"));
   }
 
+  @Test
   public void testGetParam() {
     Map<String, String> paramMap = new HashMap<String, String>();
     paramMap.put("test_severity_1", "10, 50, 100, 200");
     paramMap.put("test_severity_2", "2, 4, 8");
-    paramMap.put("test_param_1", "2!");
+    paramMap.put("test_param_1", "2&");
     paramMap.put("test_param_2", "2");
+    paramMap.put("test_param_3", "");
+    paramMap.put("test_param_4", null);
 
     double limits1[] = Utils.getParam(paramMap.get("test_severity_1"), 4);
-    assertEquals(10d, limits1[0]);
-    assertEquals(50d, limits1[1]);
-    assertEquals(100d, limits1[2]);
-    assertEquals(200d, limits1[3]);
+    assertEquals(10d, limits1[0], 0);
+    assertEquals(50d, limits1[1], 0);
+    assertEquals(100d, limits1[2], 0);
+    assertEquals(200d, limits1[3], 0);
 
     double limits2[] = Utils.getParam(paramMap.get("test_severity_2"), 4);
     assertEquals(null, limits2);
@@ -59,7 +67,28 @@ public void testGetParam() {
     assertEquals(null, limits3);
 
     double limits4[] = Utils.getParam(paramMap.get("test_param_2"), 1);
-    assertEquals(2d, limits4[0]);
+    assertEquals(2d, limits4[0], 0);
+
+    double limits5[] = Utils.getParam(paramMap.get("test_param_3"), 1);
+    assertEquals(null, limits5);
+
+    double limits6[] = Utils.getParam(paramMap.get("test_param_4"), 1);
+    assertEquals(null, limits6);
+  }
+
+  @Test
+  public void testCommaSeparated() {
+    String commaSeparated1 = Utils.commaSeparated("foo");
+    assertEquals("foo", commaSeparated1);
+
+    String commaSeparated2 = Utils.commaSeparated("foo", "bar", "");
+    assertEquals("foo,bar", commaSeparated2);
+
+    String commaSeparated3 = Utils.commaSeparated("foo", "bar", null);
+    assertEquals("foo,bar", commaSeparated3);
+
+    String commaSeparated4 = Utils.commaSeparated();
+    assertEquals("", commaSeparated4);
   }
 
 }
diff --git a/test/controllers/ApplicationTest.java b/test/controllers/ApplicationTest.java
new file mode 100644
index 000000000..667a0e96a
--- /dev/null
+++ b/test/controllers/ApplicationTest.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2015 LinkedIn Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package controllers;
+
+import org.junit.Test;
+import play.api.mvc.Content;
+import play.test.WithApplication;
+import views.html.page.homePage;
+import views.html.results.searchResults;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+
+public class ApplicationTest extends WithApplication {
+
+  @Test
+  public void testRenderHomePage() {
+    Content html = homePage.render(5, 2, 3, searchResults.render("Latest analysis", null));
+    assertEquals("text/html", html.contentType());
+    assertTrue(html.body().contains("Hello there, I've been busy!"));
+    assertTrue(html.body().contains("I looked through <b>5</b> jobs today."));
+    assertTrue(html.body().contains("About <b>2</b> of them could use some tuning."));
+    assertTrue(html.body().contains("About <b>3</b> of them need some serious attention!"));
+  }
+
+  @Test
+  public void testRenderSearch() {
+    Content html = searchResults.render("Latest analysis", null);
+    assertEquals("text/html", html.contentType());
+    assertTrue(html.body().contains("Latest analysis"));
+  }
+
+}
diff --git a/test/resources/configurations/fetcher/FetcherConfTest1.xml b/test/resources/configurations/fetcher/FetcherConfTest1.xml
new file mode 100644
index 000000000..16697b292
--- /dev/null
+++ b/test/resources/configurations/fetcher/FetcherConfTest1.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<fetchers>
+  <fetcher>
+    <applicationtype>mapreduce</applicationtype>
+    <classname>com.linkedin.drelephant.mapreduce.MapReduceFetcherHadoop2</classname>
+  </fetcher>
+  <fetcher>
+    <applicationtype>spark</applicationtype>
+    <classname>org.apache.spark.deploy.history.SparkFSFetcher</classname>
+  </fetcher>
+</fetchers>
diff --git a/test/resources/configurations/fetcher/FetcherConfTest2.xml b/test/resources/configurations/fetcher/FetcherConfTest2.xml
new file mode 100644
index 000000000..2435e3d89
--- /dev/null
+++ b/test/resources/configurations/fetcher/FetcherConfTest2.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<fetchers>
+  <fetcher>
+    <applicationtype>mapreduce</applicationtype>
+    <classname>com.linkedin.drelephant.mapreduce.MapReduceFetcherHadoop2</classname>
+  </fetcher>
+  <fetcher>
+    <applicationtype>spark</applicationtype>
+    <abc>org.apache.spark.deploy.history.SparkFSFetcher</abc>
+  </fetcher>
+</fetchers>
diff --git a/test/resources/configurations/fetcher/FetcherConfTest3.xml b/test/resources/configurations/fetcher/FetcherConfTest3.xml
new file mode 100644
index 000000000..641dffebf
--- /dev/null
+++ b/test/resources/configurations/fetcher/FetcherConfTest3.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<fetchers>
+  <fetcher>
+    <applicationtype>mapreduce</applicationtype>
+    <classname></classname>
+  </fetcher>
+</fetchers>
diff --git a/test/resources/configurations/fetcher/FetcherConfTest4.xml b/test/resources/configurations/fetcher/FetcherConfTest4.xml
new file mode 100644
index 000000000..0ec384657
--- /dev/null
+++ b/test/resources/configurations/fetcher/FetcherConfTest4.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<fetchers>
+  <fetcher>
+    <classname>com.linkedin.drelephant.mapreduce.MapReduceFetcherHadoop2</classname>
+  </fetcher>
+</fetchers>
diff --git a/test/resources/configurations/heuristic/HeuristicConfTest1.xml b/test/resources/configurations/heuristic/HeuristicConfTest1.xml
new file mode 100644
index 000000000..ac7cf1f46
--- /dev/null
+++ b/test/resources/configurations/heuristic/HeuristicConfTest1.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<heuristics>
+
+  <!-- MAP-REDUCE HEURISTICS -->
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Data Skew</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperDataSkewHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpMapperDataSkew</viewname>
+    <params>
+      <num_tasks_severity>10, 50, 100, 200</num_tasks_severity>
+      <deviation_severity>2, 4, 8, 16</deviation_severity>
+      <files_severity>1/8, 1/4, 1/2, 1</files_severity>
+    </params>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper GC</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperGCHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpGC</viewname>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Time</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperTimeHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpMapperTime</viewname>
+  </heuristic>
+
+</heuristics>
\ No newline at end of file
diff --git a/test/resources/configurations/heuristic/HeuristicConfTest2.xml b/test/resources/configurations/heuristic/HeuristicConfTest2.xml
new file mode 100644
index 000000000..94e2a56f9
--- /dev/null
+++ b/test/resources/configurations/heuristic/HeuristicConfTest2.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<heuristics>
+
+  <!-- MAP-REDUCE HEURISTICS -->
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Data Skew</heuristicname>
+    <class>com.linkedin.drelephant.mapreduce.heuristics.MapperDataSkewHeuristic</class>
+    <viewname>views.html.help.mapreduce.helpMapperDataSkew</viewname>
+    <params>
+      <num_tasks_severity>10, 50, 100, 200</num_tasks_severity>
+      <deviation_severity>2, 4, 8, 16</deviation_severity>
+      <files_severity>1/8, 1/4, 1/2, 1</files_severity>
+    </params>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper GC</heuristicname>
+    <class>com.linkedin.drelephant.mapreduce.heuristics.MapperGCHeuristic</class>
+    <viewname>views.html.help.mapreduce.helpGC</viewname>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Time</heuristicname>
+    <class>com.linkedin.drelephant.mapreduce.heuristics.MapperTimeHeuristic</class>
+    <viewname>views.html.help.mapreduce.helpMapperTime</viewname>
+  </heuristic>
+
+</heuristics>
diff --git a/test/resources/configurations/heuristic/HeuristicConfTest3.xml b/test/resources/configurations/heuristic/HeuristicConfTest3.xml
new file mode 100644
index 000000000..a0199846c
--- /dev/null
+++ b/test/resources/configurations/heuristic/HeuristicConfTest3.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<heuristics>
+
+  <!-- MAP-REDUCE HEURISTICS -->
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristic>Mapper Data Skew</heuristic>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperDataSkewHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpMapperDataSkew</viewname>
+    <params>
+      <num_tasks_severity>10, 50, 100, 200</num_tasks_severity>
+      <deviation_severity>2, 4, 8, 16</deviation_severity>
+      <files_severity>1/8, 1/4, 1/2, 1</files_severity>
+    </params>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristic>Mapper GC</heuristic>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperGCHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpGC</viewname>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristic>Mapper Time</heuristic>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperTimeHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpMapperTime</viewname>
+  </heuristic>
+
+</heuristics>
\ No newline at end of file
diff --git a/test/resources/configurations/heuristic/HeuristicConfTest4.xml b/test/resources/configurations/heuristic/HeuristicConfTest4.xml
new file mode 100644
index 000000000..0978d068b
--- /dev/null
+++ b/test/resources/configurations/heuristic/HeuristicConfTest4.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<heuristics>
+
+  <!-- MAP-REDUCE HEURISTICS -->
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Data Skew</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperDataSkewHeuristic</classname>
+    <view>views.html.help.mapreduce.helpMapperDataSkew</view>
+    <params>
+      <num_tasks_severity>10, 50, 100, 200</num_tasks_severity>
+      <deviation_severity>2, 4, 8, 16</deviation_severity>
+      <files_severity>1/8, 1/4, 1/2, 1</files_severity>
+    </params>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper GC</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperGCHeuristic</classname>
+    <view>views.html.help.mapreduce.helpGC</view>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Time</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperTimeHeuristic</classname>
+    <view>views.html.help.mapreduce.helpMapperTime</view>
+  </heuristic>
+
+</heuristics>
\ No newline at end of file
diff --git a/test/resources/configurations/heuristic/HeuristicConfTest5.xml b/test/resources/configurations/heuristic/HeuristicConfTest5.xml
new file mode 100644
index 000000000..b48729b7d
--- /dev/null
+++ b/test/resources/configurations/heuristic/HeuristicConfTest5.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<heuristics>
+
+  <!-- MAP-REDUCE HEURISTICS -->
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Data Skew</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperDataSkewHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpMapperDataSkew</viewname>
+    <params>
+      <num_tasks_severity>10, 50, 100, 200</num_tasks_severity>
+      <deviation_severity>2, 4, 8, 16</deviation_severity>
+      <files_severity>1/8, 1/4, 1/2, 1</files_severity>
+    </params>
+  </heuristic>
+
+  <heuristic>
+    <heuristicname>Mapper GC</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperGCHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpGC</viewname>
+  </heuristic>
+
+  <heuristic>
+    <applicationtype>mapreduce</applicationtype>
+    <heuristicname>Mapper Time</heuristicname>
+    <classname>com.linkedin.drelephant.mapreduce.heuristics.MapperTimeHeuristic</classname>
+    <viewname>views.html.help.mapreduce.helpMapperTime</viewname>
+  </heuristic>
+
+</heuristics>
\ No newline at end of file
diff --git a/test/resources/configurations/jobtype/JobTypeConfTest1.xml b/test/resources/configurations/jobtype/JobTypeConfTest1.xml
new file mode 100644
index 000000000..d7e23bc91
--- /dev/null
+++ b/test/resources/configurations/jobtype/JobTypeConfTest1.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<jobTypes>
+  <jobType>
+    <name>Spark</name>
+    <applicationtype>spark</applicationtype>
+    <conf>spark.app.id</conf>
+    <isDefault/>
+  </jobType>
+  <jobType>
+    <name>Pig</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>pig.script</conf>
+  </jobType>
+  <jobType>
+    <name>Hive</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>hive.mapred.mode</conf>
+  </jobType>
+  <jobType>
+    <name>Cascading</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>cascading.app.frameworks</conf>
+  </jobType>
+  <jobType>
+    <name>HadoopJava</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>mapred.child.java.opts</conf>
+    <isDefault/>
+  </jobType>
+</jobTypes>
diff --git a/test/resources/configurations/jobtype/JobTypeConfTest2.xml b/test/resources/configurations/jobtype/JobTypeConfTest2.xml
new file mode 100644
index 000000000..d53f35cf5
--- /dev/null
+++ b/test/resources/configurations/jobtype/JobTypeConfTest2.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+
+<jobTypes>
+  <jobType>
+    <name>Spark</name>
+    <applicationtype>spark</applicationtype>
+    <conf>spark.app.id</conf>
+    <isDefault/>
+  </jobType>
+  <jobType>
+    <name>Pig</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>pig.script</conf>
+  </jobType>
+  <jobType>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>hive.mapred.mode</conf>
+  </jobType>
+  <jobType>
+    <name>Cascading</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>cascading.app.frameworks</conf>
+  </jobType>
+  <jobType>
+    <name>HadoopJava</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>mapred.child.java.opts</conf>
+    <isDefault/>
+  </jobType>
+</jobTypes>
\ No newline at end of file
diff --git a/test/resources/configurations/jobtype/JobTypeConfTest3.xml b/test/resources/configurations/jobtype/JobTypeConfTest3.xml
new file mode 100644
index 000000000..218673cf8
--- /dev/null
+++ b/test/resources/configurations/jobtype/JobTypeConfTest3.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+
+<jobTypes>
+  <jobType>
+    <name>Spark</name>
+    <applicationtype>spark</applicationtype>
+    <isDefault/>
+  </jobType>
+  <jobType>
+    <name>Pig</name>
+    <applicationtype>mapreduce</applicationtype>
+  </jobType>
+  <jobType>
+    <name>Hive</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>hive.mapred.mode</conf>
+  </jobType>
+  <jobType>
+    <name>Cascading</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>cascading.app.frameworks</conf>
+  </jobType>
+  <jobType>
+    <name>HadoopJava</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>mapred.child.java.opts</conf>
+    <isDefault/>
+  </jobType>
+</jobTypes>
\ No newline at end of file
diff --git a/test/resources/configurations/jobtype/JobTypeConfTest4.xml b/test/resources/configurations/jobtype/JobTypeConfTest4.xml
new file mode 100644
index 000000000..80de75ffb
--- /dev/null
+++ b/test/resources/configurations/jobtype/JobTypeConfTest4.xml
@@ -0,0 +1,45 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+
+<jobTypes>
+  <jobType>
+    <name>Spark</name>
+    <applicationtype>spark</applicationtype>
+    <conf>spark.app.id</conf>
+    <isDefault/>
+  </jobType>
+  <jobType>
+    <name>Pig</name>
+    <conf>pig.script</conf>
+  </jobType>
+  <jobType>
+    <name>Hive</name>
+    <conf>hive.mapred.mode</conf>
+  </jobType>
+  <jobType>
+    <name>Cascading</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>cascading.app.frameworks</conf>
+  </jobType>
+  <jobType>
+    <name>HadoopJava</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>mapred.child.java.opts</conf>
+    <isDefault/>
+  </jobType>
+</jobTypes>
\ No newline at end of file
diff --git a/test/resources/configurations/jobtype/JobTypeConfTest5.xml b/test/resources/configurations/jobtype/JobTypeConfTest5.xml
new file mode 100644
index 000000000..af5323513
--- /dev/null
+++ b/test/resources/configurations/jobtype/JobTypeConfTest5.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<jobTypes>
+  <jobType>
+    <name>Voldemort</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>mapred.reducer.class</conf>
+    <value>[(voldemort)</value>
+    <isDefault/>
+  </jobType>
+</jobTypes>
diff --git a/test/resources/configurations/jobtype/JobTypeConfTest6.xml b/test/resources/configurations/jobtype/JobTypeConfTest6.xml
new file mode 100644
index 000000000..272e2b87f
--- /dev/null
+++ b/test/resources/configurations/jobtype/JobTypeConfTest6.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Copyright 2015 LinkedIn Corp.
+
+  Licensed under the Apache License, Version 2.0 (the "License"); you may not
+  use this file except in compliance with the License. You may obtain a copy of
+  the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+  WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+  License for the specific language governing permissions and limitations under
+  the License.
+-->
+
+<jobTypes>
+  <jobType>
+    <name>Spark</name>
+    <applicationtype>spark</applicationtype>
+    <conf>spark.app.id</conf>
+    <isDefault/>
+  </jobType>
+  <jobType>
+    <name>Pig</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>pig.script</conf>
+    <isDefault/>
+  </jobType>
+  <jobType>
+    <name>Hive</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>hive.mapred.mode</conf>
+    <isDefault/>
+  </jobType>
+  <jobType>
+    <name>Cascading</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>cascading.app.frameworks</conf>
+  </jobType>
+  <jobType>
+    <name>HadoopJava</name>
+    <applicationtype>mapreduce</applicationtype>
+    <conf>mapred.child.java.opts</conf>
+    <isDefault/>
+  </jobType>
+</jobTypes>