[ 
https://issues.apache.org/jira/browse/NUTCH-2442?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16240954#comment-16240954
 ] 

ASF GitHub Bot commented on NUTCH-2442:
---------------------------------------

sebastian-nagel closed pull request #239: NUTCH-2442 Injector to stop if job 
fails to avoid loss of CrawlDb
URL: https://github.com/apache/nutch/pull/239
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/src/java/org/apache/nutch/crawl/Injector.java 
b/src/java/org/apache/nutch/crawl/Injector.java
index 5f5fd15ff..d872dbff5 100644
--- a/src/java/org/apache/nutch/crawl/Injector.java
+++ b/src/java/org/apache/nutch/crawl/Injector.java
@@ -414,7 +414,16 @@ public void inject(Path crawlDb, Path urlDir, boolean 
overwrite,
 
     try {
       // run the job
-      job.waitForCompletion(true);
+      boolean success = job.waitForCompletion(true);
+      if (!success) {
+        String message = "Injector job did not succeed, job status: "
+            + job.getStatus().getState() + ", reason: "
+            + job.getStatus().getFailureInfo();
+        LOG.error(message);
+        cleanupAfterFailure(tempCrawlDb, lock, fs);
+        // throw exception so that calling routine can exit with error
+        throw new RuntimeException(message);
+      }
 
       // save output and perform cleanup
       CrawlDb.install(job, crawlDb);
@@ -452,11 +461,21 @@ public void inject(Path crawlDb, Path urlDir, boolean 
overwrite,
         LOG.info("Injector: finished at " + sdf.format(end) + ", elapsed: "
             + TimingUtil.elapsedTime(start, end));
       }
-    } catch (IOException e) {
+    } catch (IOException | InterruptedException | ClassNotFoundException e) {
+      LOG.error("Injector job failed", e);
+      cleanupAfterFailure(tempCrawlDb, lock, fs);
+      throw e;
+    }
+  }
+
+  public void cleanupAfterFailure(Path tempCrawlDb, Path lock, FileSystem fs)
+      throws IOException {
+    try {
       if (fs.exists(tempCrawlDb)) {
         fs.delete(tempCrawlDb, true);
       }
-      LockUtil.removeLockFile(conf, lock);
+      LockUtil.removeLockFile(fs, lock);
+    } catch (IOException e) {
       throw e;
     }
   }
diff --git a/src/java/org/apache/nutch/hostdb/ReadHostDb.java 
b/src/java/org/apache/nutch/hostdb/ReadHostDb.java
index 28a7eb709..eac3bf645 100644
--- a/src/java/org/apache/nutch/hostdb/ReadHostDb.java
+++ b/src/java/org/apache/nutch/hostdb/ReadHostDb.java
@@ -202,8 +202,17 @@ private void readHostDb(Path hostDb, Path output, boolean 
dumpHomepages, boolean
     job.setNumReduceTasks(0);
 
     try {
-      job.waitForCompletion(true);
-    } catch (Exception e) {
+      boolean success = job.waitForCompletion(true);
+      if (!success) {
+        String message = "ReadHostDb job did not succeed, job status: "
+            + job.getStatus().getState() + ", reason: "
+            + job.getStatus().getFailureInfo();
+        LOG.error(message);
+        // throw exception so that calling routine can exit with error
+        throw new RuntimeException(message);
+      }
+    } catch (IOException | InterruptedException | ClassNotFoundException e) {
+      LOG.error("ReadHostDb job failed", e);
       throw e;
     }
 
diff --git a/src/java/org/apache/nutch/util/CrawlCompletionStats.java 
b/src/java/org/apache/nutch/util/CrawlCompletionStats.java
index 4920fbf32..116c3113d 100644
--- a/src/java/org/apache/nutch/util/CrawlCompletionStats.java
+++ b/src/java/org/apache/nutch/util/CrawlCompletionStats.java
@@ -171,8 +171,17 @@ public int run(String[] args) throws Exception {
     job.setNumReduceTasks(numOfReducers);
 
     try {
-      job.waitForCompletion(true);
-    } catch (Exception e) {
+      boolean success = job.waitForCompletion(true);
+      if (!success) {
+        String message = jobName + " job did not succeed, job status: "
+            + job.getStatus().getState() + ", reason: "
+            + job.getStatus().getFailureInfo();
+        LOG.error(message);
+        // throw exception so that calling routine can exit with error
+        throw new RuntimeException(message);
+      }
+    } catch (IOException | InterruptedException | ClassNotFoundException e) {
+      LOG.error(jobName + " job failed");
       throw e;
     }
 
diff --git a/src/java/org/apache/nutch/util/ProtocolStatusStatistics.java 
b/src/java/org/apache/nutch/util/ProtocolStatusStatistics.java
index a18860634..7e241ffdf 100644
--- a/src/java/org/apache/nutch/util/ProtocolStatusStatistics.java
+++ b/src/java/org/apache/nutch/util/ProtocolStatusStatistics.java
@@ -122,8 +122,17 @@ public int run(String[] args) throws Exception {
     job.setNumReduceTasks(numOfReducers);
 
     try {
-      job.waitForCompletion(true);
-    } catch (Exception e) {
+      boolean success = job.waitForCompletion(true);
+      if (!success) {
+        String message = jobName + " job did not succeed, job status: "
+            + job.getStatus().getState() + ", reason: "
+            + job.getStatus().getFailureInfo();
+        LOG.error(message);
+        // throw exception so that calling routine can exit with error
+        throw new RuntimeException(message);
+      }
+    } catch (IOException | InterruptedException | ClassNotFoundException e) {
+      LOG.error(jobName + " job failed", e);
       throw e;
     }
 
diff --git a/src/java/org/apache/nutch/util/SitemapProcessor.java 
b/src/java/org/apache/nutch/util/SitemapProcessor.java
index 8b1d8cccf..5150d61c3 100644
--- a/src/java/org/apache/nutch/util/SitemapProcessor.java
+++ b/src/java/org/apache/nutch/util/SitemapProcessor.java
@@ -358,7 +358,16 @@ public void sitemap(Path crawldb, Path hostdb, Path 
sitemapUrlDir, boolean stric
     job.setReducerClass(SitemapReducer.class);
 
     try {
-      job.waitForCompletion(true);
+      boolean success = job.waitForCompletion(true);
+      if (!success) {
+        String message = "SitemapProcessor_" + crawldb.toString()
+            + " job did not succeed, job status: " + job.getStatus().getState()
+            + ", reason: " + job.getStatus().getFailureInfo();
+        LOG.error(message);
+        cleanupAfterFailure(tempCrawlDb, lock, fs);
+        // throw exception so that calling routine can exit with error
+        throw new RuntimeException(message);
+      }
 
       boolean preserveBackup = conf.getBoolean("db.preserve.backup", true);
       if (!preserveBackup && fs.exists(old))
@@ -385,11 +394,21 @@ public void sitemap(Path crawldb, Path hostdb, Path 
sitemapUrlDir, boolean stric
         long end = System.currentTimeMillis();
         LOG.info("SitemapProcessor: Finished at {}, elapsed: {}", 
sdf.format(end), TimingUtil.elapsedTime(start, end));
       }
-    } catch (Exception e) {
-      if (fs.exists(tempCrawlDb))
-        fs.delete(tempCrawlDb, true);
+    } catch (IOException | InterruptedException | ClassNotFoundException e) {
+      LOG.error("SitemapProcessor_" + crawldb.toString(), e);
+      cleanupAfterFailure(tempCrawlDb, lock, fs);
+      throw e;
+    }
+  }
 
+  public void cleanupAfterFailure(Path tempCrawlDb, Path lock, FileSystem fs)
+      throws IOException {
+    try {
+      if (fs.exists(tempCrawlDb)) {
+        fs.delete(tempCrawlDb, true);
+      }
       LockUtil.removeLockFile(fs, lock);
+    } catch (IOException e) {
       throw e;
     }
   }
diff --git a/src/java/org/apache/nutch/util/domain/DomainStatistics.java 
b/src/java/org/apache/nutch/util/domain/DomainStatistics.java
index af1c2660e..1eec59e85 100644
--- a/src/java/org/apache/nutch/util/domain/DomainStatistics.java
+++ b/src/java/org/apache/nutch/util/domain/DomainStatistics.java
@@ -140,8 +140,17 @@ public int run(String[] args) throws Exception {
     job.setNumReduceTasks(numOfReducers);
 
     try {
-      job.waitForCompletion(true);
-    } catch (Exception e) {
+      boolean success = job.waitForCompletion(true);
+      if (!success) {
+        String message = "Injector job did not succeed, job status: "
+            + job.getStatus().getState() + ", reason: "
+            + job.getStatus().getFailureInfo();
+        LOG.error(message);
+        // throw exception so that calling routine can exit with error
+        throw new RuntimeException(message);
+      }
+    } catch (IOException | InterruptedException | ClassNotFoundException e) {
+      LOG.error(jobName + " job failed", e);
       throw e;
     }
 


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> Injector to stop if job fails to avoid loss of CrawlDb
> ------------------------------------------------------
>
>                 Key: NUTCH-2442
>                 URL: https://issues.apache.org/jira/browse/NUTCH-2442
>             Project: Nutch
>          Issue Type: Bug
>          Components: injector
>    Affects Versions: 1.13
>            Reporter: Sebastian Nagel
>            Priority: Critical
>             Fix For: 1.14
>
>
> Injector does not check whether the MapReduce job is successful. Even if the 
> job fails
> - installs the CrawlDb
> -- move current/ to old/
> -- replace current/ with an empty or potentially incomplete version
> - exits with code 0 so that scripts running the crawl workflow cannot detect 
> the failure -- if Injector is run a second time the CrawlDb is lost (both 
> current/ and old/ are empty or corrupted)



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

Reply via email to