This is an automated email from the ASF dual-hosted git repository.

srdo pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/storm.git


The following commit(s) were added to refs/heads/master by this push:
     new 918e894  STORM-3461: examples-hdfs: fix all checkstyle warnings
     new 5b1f70d  Merge pull request #3079 from 
krichter722/checkstyle-hdfs-examples
918e894 is described below

commit 918e894da7ac528cf2f350babe3878c36107b0ba
Author: Karl-Philipp Richter <krich...@posteo.de>
AuthorDate: Sat Jul 6 18:09:45 2019 +0200

    STORM-3461: examples-hdfs: fix all checkstyle warnings
---
 examples/storm-hdfs-examples/pom.xml               |  2 +-
 .../apache/storm/hdfs/bolt/HdfsFileTopology.java   |  1 +
 .../storm/hdfs/bolt/SequenceFileTopology.java      |  1 +
 .../apache/storm/hdfs/spout/HdfsSpoutTopology.java | 33 +++++++++++-----------
 .../storm/hdfs/trident/TridentFileTopology.java    | 10 +++----
 .../hdfs/trident/TridentSequenceTopology.java      | 10 +++----
 6 files changed, 29 insertions(+), 28 deletions(-)

diff --git a/examples/storm-hdfs-examples/pom.xml 
b/examples/storm-hdfs-examples/pom.xml
index f1e11da..697a22c 100644
--- a/examples/storm-hdfs-examples/pom.xml
+++ b/examples/storm-hdfs-examples/pom.xml
@@ -97,7 +97,7 @@
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <!--Note - the version would be inherited-->
                 <configuration>
-                    <maxAllowedViolations>29</maxAllowedViolations>
+                    <maxAllowedViolations>0</maxAllowedViolations>
                 </configuration>
             </plugin>
             <plugin>
diff --git 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
index e0e3057..01a446c 100644
--- 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
+++ 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/HdfsFileTopology.java
@@ -101,6 +101,7 @@ public class HdfsFileTopology {
         try {
             Thread.sleep(seconds * 1000);
         } catch (InterruptedException e) {
+            //ignore
         }
     }
 
diff --git 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
index 2a58249..90e0aea 100644
--- 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
+++ 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/bolt/SequenceFileTopology.java
@@ -104,6 +104,7 @@ public class SequenceFileTopology {
         try {
             Thread.sleep(seconds * 1000);
         } catch (InterruptedException e) {
+            //ignore
         }
     }
 
diff --git 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/spout/HdfsSpoutTopology.java
 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/spout/HdfsSpoutTopology.java
index e3282d8..890b7ab 100644
--- 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/spout/HdfsSpoutTopology.java
+++ 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/spout/HdfsSpoutTopology.java
@@ -63,25 +63,13 @@ public class HdfsSpoutTopology {
         }
 
         // 1 - parse cmd line args
-        String topologyName = args[0];
         String hdfsUri = args[1];
         String fileFormat = args[2];
         String sourceDir = args[3];
         String archiveDir = args[4];
         String badDir = args[5];
-        int spoutNum = Integer.parseInt(args[6]);
-
-        // 2 - create and configure spout and bolt
-        ConstBolt bolt = new ConstBolt();
-
-        HdfsSpout spout = new 
HdfsSpout().withOutputFields(TextFileReader.defaultFields)
-                                         .setReaderType(fileFormat)
-                                         .setHdfsUri(hdfsUri)
-                                         .setSourceDir(sourceDir)
-                                         .setArchiveDir(archiveDir)
-                                         .setBadFilesDir(badDir);
 
-        // 3 - Create and configure topology
+        // 2 - Create and configure topology
         Config conf = new Config();
         conf.setNumWorkers(1);
         conf.setNumAckers(1);
@@ -90,15 +78,24 @@ public class HdfsSpoutTopology {
         conf.registerMetricsConsumer(LoggingMetricsConsumer.class);
 
         TopologyBuilder builder = new TopologyBuilder();
+        HdfsSpout spout = new 
HdfsSpout().withOutputFields(TextFileReader.defaultFields)
+                .setReaderType(fileFormat)
+                .setHdfsUri(hdfsUri)
+                .setSourceDir(sourceDir)
+                .setArchiveDir(archiveDir)
+                .setBadFilesDir(badDir);
+        int spoutNum = Integer.parseInt(args[6]);
         builder.setSpout(SPOUT_ID, spout, spoutNum);
+        ConstBolt bolt = new ConstBolt();
         builder.setBolt(BOLT_ID, bolt, 1).shuffleGrouping(SPOUT_ID);
 
-        // 4 - submit topology, wait for a few min and terminate it
+        // 3 - submit topology, wait for a few min and terminate it
         Map<String, Object> clusterConf = Utils.readStormConfig();
+        String topologyName = args[0];
         StormSubmitter.submitTopologyWithProgressBar(topologyName, conf, 
builder.createTopology());
         Nimbus.Iface client = 
NimbusClient.getConfiguredClient(clusterConf).getClient();
 
-        // 5 - Print metrics every 30 sec, kill topology after 20 min
+        // 4 - Print metrics every 30 sec, kill topology after 20 min
         for (int i = 0; i < 40; i++) {
             Thread.sleep(30 * 1000);
             printMetrics(client, topologyName);
@@ -149,8 +146,10 @@ public class HdfsSpoutTopology {
             }
         }
         double avgLatency = weightedAvgTotal / acked;
-        System.out.println("uptime: " + uptime + " acked: " + acked + " 
avgLatency: " + avgLatency + " acked/sec: " +
-                           (((double) acked) / uptime + " failed: " + failed));
+        System.out.println("uptime: " + uptime
+                + " acked: " + acked
+                + " avgLatency: " + avgLatency
+                + " acked/sec: " + (((double) acked) / uptime + " failed: " + 
failed));
     }
 
     public static class ConstBolt extends BaseRichBolt {
diff --git 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
index e8bf490..b722497 100644
--- 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
+++ 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentFileTopology.java
@@ -41,11 +41,11 @@ import org.yaml.snakeyaml.Yaml;
 public class TridentFileTopology {
 
     public static StormTopology buildTopology(String hdfsUrl) {
-        FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", 
"key"), 1000, new Values("the cow jumped over the moon", 1l),
-                                                    new Values("the man went 
to the store and bought some candy", 2l),
-                                                    new Values("four score and 
seven years ago", 3l),
-                                                    new Values("how many 
apples can you eat", 4l),
-                                                    new Values("to be or not 
to be the person", 5l));
+        FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", 
"key"), 1000, new Values("the cow jumped over the moon", 1L),
+                                                    new Values("the man went 
to the store and bought some candy", 2L),
+                                                    new Values("four score and 
seven years ago", 3L),
+                                                    new Values("how many 
apples can you eat", 4L),
+                                                    new Values("to be or not 
to be the person", 5L));
         spout.setCycle(true);
 
         TridentTopology topology = new TridentTopology();
diff --git 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
index 15612bd..74d1d5c 100644
--- 
a/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
+++ 
b/examples/storm-hdfs-examples/src/main/java/org/apache/storm/hdfs/trident/TridentSequenceTopology.java
@@ -41,11 +41,11 @@ import org.yaml.snakeyaml.Yaml;
 public class TridentSequenceTopology {
 
     public static StormTopology buildTopology(String hdfsUrl) {
-        FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", 
"key"), 1000, new Values("the cow jumped over the moon", 1l),
-                                                    new Values("the man went 
to the store and bought some candy", 2l),
-                                                    new Values("four score and 
seven years ago", 3l),
-                                                    new Values("how many 
apples can you eat", 4l),
-                                                    new Values("to be or not 
to be the person", 5l));
+        FixedBatchSpout spout = new FixedBatchSpout(new Fields("sentence", 
"key"), 1000, new Values("the cow jumped over the moon", 1L),
+                                                    new Values("the man went 
to the store and bought some candy", 2L),
+                                                    new Values("four score and 
seven years ago", 3L),
+                                                    new Values("how many 
apples can you eat", 4L),
+                                                    new Values("to be or not 
to be the person", 5L));
         spout.setCycle(true);
 
         TridentTopology topology = new TridentTopology();

Reply via email to