DRILL-212: Resource leak in 
org.apache.drill.exec.store.parquet.ParquetRecordReader.setup()


Project: http://git-wip-us.apache.org/repos/asf/incubator-drill/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-drill/commit/ad992eba
Tree: http://git-wip-us.apache.org/repos/asf/incubator-drill/tree/ad992eba
Diff: http://git-wip-us.apache.org/repos/asf/incubator-drill/diff/ad992eba

Branch: refs/heads/master
Commit: ad992eba7468012323ff07b8369751a7e2774c21
Parents: b34b89a
Author: Aditya Kishore <[email protected]>
Authored: Wed Sep 4 18:50:17 2013 -0700
Committer: Jacques Nadeau <[email protected]>
Committed: Thu Sep 5 16:48:50 2013 -0700

----------------------------------------------------------------------
 .../apache/drill/exec/store/parquet/ParquetRecordReader.java    | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-drill/blob/ad992eba/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordReader.java
----------------------------------------------------------------------
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordReader.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordReader.java
index 329fa8f..b6b3d41 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordReader.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/parquet/ParquetRecordReader.java
@@ -256,9 +256,9 @@ public class ParquetRecordReader implements RecordReader {
     long totalBytesWritten = 0;
     int validBytesInCurrentBuffer;
     byte[] buffer = new byte[bufferSize];
-    try {
+    
+    try (FSDataInputStream inputStream = fileSystem.open(hadoopPath)) {
       bufferWithAllData = allocator.buffer(totalByteLength);
-      FSDataInputStream inputStream = fileSystem.open(hadoopPath);
       inputStream.seek(start);
       while (totalBytesWritten < totalByteLength){
         validBytesInCurrentBuffer = (int) Math.min(bufferSize, totalByteLength 
- totalBytesWritten);
@@ -266,7 +266,6 @@ public class ParquetRecordReader implements RecordReader {
         bufferWithAllData.writeBytes(buffer, 0 , (int) 
validBytesInCurrentBuffer);
         totalBytesWritten += validBytesInCurrentBuffer;
       }
-
     } catch (IOException e) {
       throw new ExecutionSetupException("Error opening or reading metatdata 
for parquet file at location: " + hadoopPath.getName());
     }

Reply via email to