Apache-Phoenix | master | HBase 2.4 | Build #612 SUCCESS

2023-12-14 Thread Apache Jenkins Server

master branch  HBase 2.4  build #612 status SUCCESS
Build #612 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/master/612/


Apache-Phoenix | master | HBase 2.5 | Build #612 FAILURE

2023-12-14 Thread Apache Jenkins Server

master branch  HBase 2.5  build #612 status FAILURE
Build #612 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/master/612/


Apache-Phoenix | 5.1 | HBase 2.4 | Build #297 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.4  build #297 status SUCCESS
Build #297 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/297/


Apache-Phoenix | 5.1 | HBase 2.3 | Build #297 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.3  build #297 status SUCCESS
Build #297 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/297/


Apache-Phoenix | 5.1 | HBase 2.5 | Build #297 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.5  build #297 status SUCCESS
Build #297 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/297/


(phoenix) branch 5.1 updated: PHOENIX-6721 CSV bulkload tool fails with FileNotFoundException if --output points to the S3 location

2023-12-14 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new 6fd287a08e PHOENIX-6721 CSV bulkload tool fails with 
FileNotFoundException if --output points to the S3 location
6fd287a08e is described below

commit 6fd287a08e55a2bd06c15a543c4c211b5e815986
Author: Sergey Soldatov 
AuthorDate: Tue May 31 13:37:20 2022 -0700

PHOENIX-6721 CSV bulkload tool fails with FileNotFoundException if --output 
points to the S3 location

Co-authored-by: Istvan Toth 
---
 .../phoenix/mapreduce/MultiHfileOutputFormat.java  | 25 +++---
 1 file changed, 13 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 3a9071e123..a027f00400 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -67,10 +67,11 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.phoenix.compat.hbase.CompatUtil;
 import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
@@ -113,7 +114,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat 
getRecordWriter(TaskAttemptContext context)
 throws IOException, InterruptedException {
-return createRecordWriter(context);
+return createRecordWriter(context, this.getOutputCommitter(context));
 }
 
 /**
@@ -122,11 +123,11 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat RecordWriter 
createRecordWriter(final TaskAttemptContext context)
+static  RecordWriter 
createRecordWriter(
+final TaskAttemptContext context, final OutputCommitter committer)
 throws IOException {
 // Get the path of the temporary output file
-final Path outputPath = FileOutputFormat.getOutputPath(context);
-final Path outputdir = new FileOutputCommitter(outputPath, 
context).getWorkPath();
+final Path outputdir = ((PathOutputCommitter) committer).getWorkPath();
 final Configuration conf = context.getConfiguration();
 final FileSystem fs = outputdir.getFileSystem(conf);
  
@@ -336,7 +337,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat createFamilyCompressionMap(Configuration 
conf,final String tableName) {
 Map compressionMap = new 
TreeMap(Bytes.BYTES_COMPARATOR);
 Map tableConfigs = getTableConfigurations(conf, 
tableName);
-if(tableConfigs == null) {
+if (tableConfigs == null) {
 return compressionMap;
 }
 Map stringMap = 
createFamilyConfValueMap(tableConfigs,COMPRESSION_FAMILIES_CONF_KEY);
@@ -355,7 +356,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat getTableConfigurations(Configuration 
conf, final String tableName) {
 String tableDefn = conf.get(tableName);
-if(StringUtils.isEmpty(tableDefn)) {
+if (StringUtils.isEmpty(tableDefn)) {
 return null;
 }
 TargetTableRef table = 
TargetTableRefFunctions.FROM_JSON.apply(tableDefn);
@@ -374,7 +375,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat createFamilyBloomTypeMap(Configuration 
conf,final String tableName) {
 Map bloomTypeMap = new 
TreeMap(Bytes.BYTES_COMPARATOR);
 Map tableConfigs = getTableConfigurations(conf, 
tableName);
-if(tableConfigs == null) {
+if (tableConfigs == null) {
 return bloomTypeMap;
 }
 Map stringMap = 
createFamilyConfValueMap(tableConfigs,BLOOM_TYPE_FAMILIES_CONF_KEY);
@@ -396,7 +397,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat createFamilyBlockSizeMap(Configuration 
conf,final String tableName) {
 Map blockSizeMap = new 
TreeMap(Bytes.BYTES_COMPARATOR);
 Map tableConfigs = getTableConfigurations(conf, 
tableName);
-if(tableConfigs == null) {
+if (tableConfigs == null) {
 return blockSizeMap;
 }
 Map stringMap = 
createFamilyConfValueMap(tableConfigs,BLOCK_SIZE_FAMILIES_CONF_KEY);
@@ -420,7 +421,7 @@ public class MultiHfileOutputFormat extends 
FileOut

(phoenix) branch master updated: PHOENIX-6721 CSV bulkload tool fails with FileNotFoundException if --output points to the S3 location

2023-12-14 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 0ed534f4cf PHOENIX-6721 CSV bulkload tool fails with 
FileNotFoundException if --output points to the S3 location
0ed534f4cf is described below

commit 0ed534f4cfefb059f5c8633f0db9c4a188ba97df
Author: Sergey Soldatov 
AuthorDate: Tue May 31 13:37:20 2022 -0700

PHOENIX-6721 CSV bulkload tool fails with FileNotFoundException if --output 
points to the S3 location

Co-authored-by: Istvan Toth 
---
 .../phoenix/mapreduce/MultiHfileOutputFormat.java  | 25 +++---
 1 file changed, 13 insertions(+), 12 deletions(-)

diff --git 
a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index 3b2d4c47bf..b792958b7a 100644
--- 
a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -68,10 +68,11 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.RecordWriter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.PathOutputCommitter;
 import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner;
 import org.apache.phoenix.compat.hbase.CompatUtil;
 import org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair;
@@ -114,7 +115,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat 
getRecordWriter(TaskAttemptContext context)
 throws IOException, InterruptedException {
-return createRecordWriter(context);
+return createRecordWriter(context, this.getOutputCommitter(context));
 }
 
 /**
@@ -123,11 +124,11 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat RecordWriter 
createRecordWriter(final TaskAttemptContext context)
+static  RecordWriter 
createRecordWriter(
+final TaskAttemptContext context, final OutputCommitter committer)
 throws IOException {
 // Get the path of the temporary output file
-final Path outputPath = FileOutputFormat.getOutputPath(context);
-final Path outputdir = new FileOutputCommitter(outputPath, 
context).getWorkPath();
+final Path outputdir = ((PathOutputCommitter) committer).getWorkPath();
 final Configuration conf = context.getConfiguration();
 final FileSystem fs = outputdir.getFileSystem(conf);
  
@@ -336,7 +337,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat createFamilyCompressionMap(Configuration 
conf,final String tableName) {
 Map compressionMap = new 
TreeMap(Bytes.BYTES_COMPARATOR);
 Map tableConfigs = getTableConfigurations(conf, 
tableName);
-if(tableConfigs == null) {
+if (tableConfigs == null) {
 return compressionMap;
 }
 Map stringMap = 
createFamilyConfValueMap(tableConfigs,COMPRESSION_FAMILIES_CONF_KEY);
@@ -355,7 +356,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat getTableConfigurations(Configuration 
conf, final String tableName) {
 String tableDefn = conf.get(tableName);
-if(StringUtils.isEmpty(tableDefn)) {
+if (StringUtils.isEmpty(tableDefn)) {
 return null;
 }
 TargetTableRef table = 
TargetTableRefFunctions.FROM_JSON.apply(tableDefn);
@@ -374,7 +375,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat createFamilyBloomTypeMap(Configuration 
conf,final String tableName) {
 Map bloomTypeMap = new 
TreeMap(Bytes.BYTES_COMPARATOR);
 Map tableConfigs = getTableConfigurations(conf, 
tableName);
-if(tableConfigs == null) {
+if (tableConfigs == null) {
 return bloomTypeMap;
 }
 Map stringMap = 
createFamilyConfValueMap(tableConfigs,BLOOM_TYPE_FAMILIES_CONF_KEY);
@@ -396,7 +397,7 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat createFamilyBlockSizeMap(Configuration 
conf,final String tableName) {
 Map blockSizeMap = new 
TreeMap(Bytes.BYTES_COMPARATOR);
 Map tableConfigs = getTableConfigurations(conf, 
tableName);
-if(tableConfigs == null) {
+if (tableConfigs == null) {
 return blockSizeMap;
 }
 Map stringMap = 
createFamilyConfValueMap(tableConfigs,BLOCK_SIZE_FAMILIES_CONF_KEY);
@@ -420,7 +421,7 @@ public class Multi

Apache-Phoenix | 5.1 | HBase 2.2 | Build #296 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.2  build #296 status SUCCESS
Build #296 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/296/


Apache-Phoenix | 5.1 | HBase 2.1 | Build #296 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.1  build #296 status SUCCESS
Build #296 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/296/


Apache-Phoenix | master | HBase 2.5 | Build #611 FAILURE

2023-12-14 Thread Apache Jenkins Server

master branch  HBase 2.5  build #611 status FAILURE
Build #611 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/master/611/


(phoenix-omid) branch master updated: OMID-250:Remove duplicate declarations of hadoop-hdfs-client dependency in pom.xml (#150)

2023-12-14 Thread rajeshbabu
This is an automated email from the ASF dual-hosted git repository.

rajeshbabu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix-omid.git


The following commit(s) were added to refs/heads/master by this push:
 new 134d2af4 OMID-250:Remove duplicate declarations of hadoop-hdfs-client 
dependency in pom.xml (#150)
134d2af4 is described below

commit 134d2af4cd6c79e3f099879cc709eacf383c5def
Author: Anchal Kejriwal <55595137+anchal...@users.noreply.github.com>
AuthorDate: Thu Dec 14 22:07:29 2023 +0530

OMID-250:Remove duplicate declarations of hadoop-hdfs-client dependency in 
pom.xml (#150)
---
 pom.xml | 6 --
 1 file changed, 6 deletions(-)

diff --git a/pom.xml b/pom.xml
index c5bd0467..5b7d1872 100644
--- a/pom.xml
+++ b/pom.xml
@@ -818,12 +818,6 @@
 ${hadoop.version}
 
 
-
-org.apache.hadoop
-hadoop-hdfs-client
-${hadoop.version}
-
-
 
 org.apache.hadoop
 hadoop-hdfs



Apache-Phoenix | master | HBase 2.4 | Build #611 FAILURE

2023-12-14 Thread Apache Jenkins Server

master branch  HBase 2.4  build #611 status FAILURE
Build #611 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/master/611/


Apache-Phoenix | 5.1 | HBase 2.3 | Build #296 FAILURE

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.3  build #296 status FAILURE
Build #296 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/296/


Apache-Phoenix | 5.1 | HBase 2.5 | Build #296 FAILURE

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.5  build #296 status FAILURE
Build #296 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/296/


Apache-Phoenix | 5.1 | HBase 2.4 | Build #296 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.4  build #296 status SUCCESS
Build #296 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/296/


(phoenix) branch 5.1 updated: PHOENIX-7153 Fix Warnings Flagged as Errors by Eclipse

2023-12-14 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
 new dae7d64559 PHOENIX-7153 Fix Warnings Flagged as Errors by Eclipse
dae7d64559 is described below

commit dae7d6455983f47bf5676f9dad4656ce67babf45
Author: Istvan Toth 
AuthorDate: Thu Dec 14 08:35:55 2023 +0100

PHOENIX-7153 Fix Warnings Flagged as Errors by Eclipse
---
 .../org/apache/phoenix/expression/RowValueConstructorExpression.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index f92d1e22cb..e62e8599b6 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -121,7 +121,7 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 public void write(DataOutput output) throws IOException {
 super.write(output);
 byte[] b = extraFields.toByteArray();
-output.writeByte((int)(b.length > 0 ? b[0] & 0xff  : 0));
+output.writeByte((b.length > 0 ? b[0] & 0xff  : 0));
 }
 
 private void init() {



(phoenix) branch master updated: PHOENIX-7153 Fix Warnings Flagged as Errors by Eclipse

2023-12-14 Thread stoty
This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/master by this push:
 new 75e085b93c PHOENIX-7153 Fix Warnings Flagged as Errors by Eclipse
75e085b93c is described below

commit 75e085b93c94c8bfff8141dacd9185f09d13e5c3
Author: Istvan Toth 
AuthorDate: Thu Dec 14 08:35:55 2023 +0100

PHOENIX-7153 Fix Warnings Flagged as Errors by Eclipse
---
 .../org/apache/phoenix/expression/RowValueConstructorExpression.java | 2 +-
 .../apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java   | 2 +-
 .../java/org/apache/phoenix/mapreduce/transform/TransformTool.java   | 5 ++---
 3 files changed, 4 insertions(+), 5 deletions(-)

diff --git 
a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
 
b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
index f92d1e22cb..e62e8599b6 100644
--- 
a/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
+++ 
b/phoenix-core-client/src/main/java/org/apache/phoenix/expression/RowValueConstructorExpression.java
@@ -121,7 +121,7 @@ public class RowValueConstructorExpression extends 
BaseCompoundExpression {
 public void write(DataOutput output) throws IOException {
 super.write(output);
 byte[] b = extraFields.toByteArray();
-output.writeByte((int)(b.length > 0 ? b[0] & 0xff  : 0));
+output.writeByte((b.length > 0 ? b[0] & 0xff  : 0));
 }
 
 private void init() {
diff --git 
a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java
 
b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java
index 58b8b6dbee..ae8ebf3dd7 100644
--- 
a/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java
+++ 
b/phoenix-core-client/src/main/java/org/apache/phoenix/jdbc/ParallelPhoenixNullComparingResultSet.java
@@ -128,7 +128,7 @@ public class ParallelPhoenixNullComparingResultSet extends 
DelegateResultSet imp
 boolean secondResult;
 try {
 secondResult =
-(boolean) 
ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(
+ParallelPhoenixUtil.INSTANCE.getFutureNoRetry(
 
candidateResultPair.getSecond().getCandidate(), context);
 } catch (Exception e) {
 LOG.warn(
diff --git 
a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java
 
b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java
index d9a761a88b..ccaf84cb76 100644
--- 
a/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java
+++ 
b/phoenix-core-server/src/main/java/org/apache/phoenix/mapreduce/transform/TransformTool.java
@@ -711,9 +711,8 @@ public class TransformTool extends Configured implements 
Tool {
 byte[][] newSplitPoints = null;
 // TODO : if the rowkey changes via transform, we need to create new 
split points
 try (Table hDataTable =
- (Table) pConnection.getQueryServices()
- .getTable(oldTable.getPhysicalName().getBytes());
- org.apache.hadoop.hbase.client.Connection connection =
+
pConnection.getQueryServices().getTable(oldTable.getPhysicalName().getBytes());
+org.apache.hadoop.hbase.client.Connection connection =
  
HBaseFactoryProvider.getHConnectionFactory().createConnection(configuration)) {
 // Avoid duplicate split keys and remove the empty key
 oldSplitPoints = 
connection.getRegionLocator(hDataTable.getName()).getStartKeys();



Apache-Phoenix | 5.1 | HBase 2.1 | Build #295 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.1  build #295 status SUCCESS
Build #295 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/295/


Apache-Phoenix | 5.1 | HBase 2.2 | Build #295 SUCCESS

2023-12-14 Thread Apache Jenkins Server

5.1 branch  HBase 2.2  build #295 status SUCCESS
Build #295 https://ci-hadoop.apache.org/job/Phoenix/job/Phoenix-mulitbranch/job/5.1/295/