This is an automated email from the ASF dual-hosted git repository.
djwang pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudberry-pxf.git
The following commit(s) were added to refs/heads/main by this push:
new d5750728 Upgrade Apache AVRO library in PXF
d5750728 is described below
commit d5750728a88b352309d8bbbe5050705747b4f95f
Author: Nikolay Antonov <[email protected]>
AuthorDate: Wed Feb 4 07:10:12 2026 +0500
Upgrade Apache AVRO library in PXF
* Bump `avro` library to the latest java-8 compatible version.
* Bump `avro` and its dependencies in automation tests.
* Add support for `zstandard` compression codec and test covering all
supported codecs.
* Update documentation.
---
automation/pom.xml | 18 +++-
.../hdfs/writable/avro/codec/expected/query01.ans | 106 +++++++++++++++++++++
.../hdfs/writable/avro/codec/sql/query01.sql | 3 +
.../features/avro/HdfsReadableAvroTest.java | 4 +-
.../features/avro/HdfsWritableAvroTest.java | 25 +++++
docs/content/access_s3.html.md.erb | 32 ++++++-
docs/content/hdfs_avro.html.md.erb | 41 +++++++-
docs/content/objstore_avro.html.md.erb | 34 ++++++-
docs/content/objstore_fileasrow.html.md.erb | 2 +-
docs/content/objstore_fixedwidth.html.md.erb | 2 +-
docs/content/objstore_json.html.md.erb | 2 +-
docs/content/objstore_orc.html.md.erb | 2 +-
docs/content/objstore_parquet.html.md.erb | 2 +-
docs/content/objstore_seqfile.html.md.erb | 2 +-
docs/content/objstore_text.html.md.erb | 6 +-
docs/content/s3_objstore_cfg.html.md.erb | 2 +-
server/build.gradle | 4 +-
server/pxf-hdfs/build.gradle | 2 +
.../pxf/plugins/hdfs/AvroFileAccessor.java | 4 +
19 files changed, 269 insertions(+), 24 deletions(-)
diff --git a/automation/pom.xml b/automation/pom.xml
index 07bb1028..e294cac0 100644
--- a/automation/pom.xml
+++ b/automation/pom.xml
@@ -133,6 +133,11 @@
<artifactId>snappy-java</artifactId>
<version>${snappy.java.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.github.luben</groupId>
+ <artifactId>zstd-jni</artifactId>
+ <version>1.5.7-6</version>
+ </dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
@@ -257,19 +262,19 @@
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
- <version>2.12.6</version>
+ <version>2.14.3</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
- <version>2.13.4.2</version>
+ <version>2.14.3</version>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
- <version>2.12.6</version>
+ <version>2.14.3</version>
</dependency>
<dependency>
@@ -361,10 +366,15 @@
<version>${hdp.hadoop.version}</version>
</dependency>
+ <dependency>
+ <groupId>org.apache.avro</groupId>
+ <artifactId>avro</artifactId>
+ <version>1.11.3</version>
+ </dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro-tools</artifactId>
- <version>1.7.7</version>
+ <version>1.11.3</version> <!-- AVRO-4218: test dependencies for
1.11.5 haven't been published-->
</dependency>
<dependency>
diff --git
a/automation/sqlrepo/features/hdfs/writable/avro/codec/expected/query01.ans
b/automation/sqlrepo/features/hdfs/writable/avro/codec/expected/query01.ans
new file mode 100644
index 00000000..a70421a6
--- /dev/null
+++ b/automation/sqlrepo/features/hdfs/writable/avro/codec/expected/query01.ans
@@ -0,0 +1,106 @@
+-- @description query01 for PXF HDFS Writable Avro codecs test
+
+SELECT * from writable_avro_codec_readable ORDER BY type_int;
+ type_int | type_smallint | type_long | type_float | type_double |
type_string | type_bytes | type_boolean | type_char |
type_varchar
+----------+---------------+----------------+------------+--------------+-------------+---------------+--------------+----------------------+---------------------------
+ 1 | 1 | 100000000000 | 2.0001 | 100000.0001 | row_1
| bytes for 1 | f | character row 1 | character varying
row 1
+ 2 | 2 | 200000000000 | 3.0001 | 200000.0002 | row_2
| bytes for 2 | t | character row 2 | character varying
row 2
+ 3 | 3 | 300000000000 | 4.0001 | 300000.0003 | row_3
| bytes for 3 | f | character row 3 | character varying
row 3
+ 4 | 4 | 400000000000 | 5.0001 | 400000.0004 | row_4
| bytes for 4 | t | character row 4 | character varying
row 4
+ 5 | 5 | 500000000000 | 6.0001 | 500000.0005 | row_5
| bytes for 5 | f | character row 5 | character varying
row 5
+ 6 | 6 | 600000000000 | 7.0001 | 600000.0006 | row_6
| bytes for 6 | t | character row 6 | character varying
row 6
+ 7 | 7 | 700000000000 | 8.0001 | 700000.0007 | row_7
| bytes for 7 | f | character row 7 | character varying
row 7
+ 8 | 8 | 800000000000 | 9.0001 | 800000.0008 | row_8
| bytes for 8 | t | character row 8 | character varying
row 8
+ 9 | 9 | 900000000000 | 10.0001 | 900000.0009 | row_9
| bytes for 9 | f | character row 9 | character varying
row 9
+ 10 | 10 | 1000000000000 | 11.0001 | 1000000.001 |
row_10 | bytes for 10 | t | character row 10 | character
varying row 10
+ 11 | 11 | 1100000000000 | 12.0001 | 1100000.0011 |
row_11 | bytes for 11 | f | character row 11 | character
varying row 11
+ 12 | 12 | 1200000000000 | 13.0001 | 1200000.0012 |
row_12 | bytes for 12 | t | character row 12 | character
varying row 12
+ 13 | 13 | 1300000000000 | 14.0001 | 1300000.0013 |
row_13 | bytes for 13 | f | character row 13 | character
varying row 13
+ 14 | 14 | 1400000000000 | 15.0001 | 1400000.0014 |
row_14 | bytes for 14 | t | character row 14 | character
varying row 14
+ 15 | 15 | 1500000000000 | 16.0001 | 1500000.0015 |
row_15 | bytes for 15 | f | character row 15 | character
varying row 15
+ 16 | 16 | 1600000000000 | 17.0001 | 1600000.0016 |
row_16 | bytes for 16 | t | character row 16 | character
varying row 16
+ 17 | 17 | 1700000000000 | 18.0001 | 1700000.0017 |
row_17 | bytes for 17 | f | character row 17 | character
varying row 17
+ 18 | 18 | 1800000000000 | 19.0001 | 1800000.0018 |
row_18 | bytes for 18 | t | character row 18 | character
varying row 18
+ 19 | 19 | 1900000000000 | 20.0001 | 1900000.0019 |
row_19 | bytes for 19 | f | character row 19 | character
varying row 19
+ 20 | 20 | 2000000000000 | 21.0001 | 2000000.002 |
row_20 | bytes for 20 | t | character row 20 | character
varying row 20
+ 21 | 21 | 2100000000000 | 22.0001 | 2100000.0021 |
row_21 | bytes for 21 | f | character row 21 | character
varying row 21
+ 22 | 22 | 2200000000000 | 23.0001 | 2200000.0022 |
row_22 | bytes for 22 | t | character row 22 | character
varying row 22
+ 23 | 23 | 2300000000000 | 24.0001 | 2300000.0023 |
row_23 | bytes for 23 | f | character row 23 | character
varying row 23
+ 24 | 24 | 2400000000000 | 25.0001 | 2400000.0024 |
row_24 | bytes for 24 | t | character row 24 | character
varying row 24
+ 25 | 25 | 2500000000000 | 26.0001 | 2500000.0025 |
row_25 | bytes for 25 | f | character row 25 | character
varying row 25
+ 26 | 26 | 2600000000000 | 27.0001 | 2600000.0026 |
row_26 | bytes for 26 | t | character row 26 | character
varying row 26
+ 27 | 27 | 2700000000000 | 28.0001 | 2700000.0027 |
row_27 | bytes for 27 | f | character row 27 | character
varying row 27
+ 28 | 28 | 2800000000000 | 29.0001 | 2800000.0028 |
row_28 | bytes for 28 | t | character row 28 | character
varying row 28
+ 29 | 29 | 2900000000000 | 30.0001 | 2900000.0029 |
row_29 | bytes for 29 | f | character row 29 | character
varying row 29
+ 30 | 30 | 3000000000000 | 31.0001 | 3000000.003 |
row_30 | bytes for 30 | t | character row 30 | character
varying row 30
+ 31 | 31 | 3100000000000 | 32.0001 | 3100000.0031 |
row_31 | bytes for 31 | f | character row 31 | character
varying row 31
+ 32 | 32 | 3200000000000 | 33.0001 | 3200000.0032 |
row_32 | bytes for 32 | t | character row 32 | character
varying row 32
+ 33 | 33 | 3300000000000 | 34.0001 | 3300000.0033 |
row_33 | bytes for 33 | f | character row 33 | character
varying row 33
+ 34 | 34 | 3400000000000 | 35.0001 | 3400000.0034 |
row_34 | bytes for 34 | t | character row 34 | character
varying row 34
+ 35 | 35 | 3500000000000 | 36.0001 | 3500000.0035 |
row_35 | bytes for 35 | f | character row 35 | character
varying row 35
+ 36 | 36 | 3600000000000 | 37.0001 | 3600000.0036 |
row_36 | bytes for 36 | t | character row 36 | character
varying row 36
+ 37 | 37 | 3700000000000 | 38.0001 | 3700000.0037 |
row_37 | bytes for 37 | f | character row 37 | character
varying row 37
+ 38 | 38 | 3800000000000 | 39.0001 | 3800000.0038 |
row_38 | bytes for 38 | t | character row 38 | character
varying row 38
+ 39 | 39 | 3900000000000 | 40.0001 | 3900000.0039 |
row_39 | bytes for 39 | f | character row 39 | character
varying row 39
+ 40 | 40 | 4000000000000 | 41.0001 | 4000000.004 |
row_40 | bytes for 40 | t | character row 40 | character
varying row 40
+ 41 | 41 | 4100000000000 | 42.0001 | 4100000.0041 |
row_41 | bytes for 41 | f | character row 41 | character
varying row 41
+ 42 | 42 | 4200000000000 | 43.0001 | 4200000.0042 |
row_42 | bytes for 42 | t | character row 42 | character
varying row 42
+ 43 | 43 | 4300000000000 | 44.0001 | 4300000.0043 |
row_43 | bytes for 43 | f | character row 43 | character
varying row 43
+ 44 | 44 | 4400000000000 | 45.0001 | 4400000.0044 |
row_44 | bytes for 44 | t | character row 44 | character
varying row 44
+ 45 | 45 | 4500000000000 | 46.0001 | 4500000.0045 |
row_45 | bytes for 45 | f | character row 45 | character
varying row 45
+ 46 | 46 | 4600000000000 | 47.0001 | 4600000.0046 |
row_46 | bytes for 46 | t | character row 46 | character
varying row 46
+ 47 | 47 | 4700000000000 | 48.0001 | 4700000.0047 |
row_47 | bytes for 47 | f | character row 47 | character
varying row 47
+ 48 | 48 | 4800000000000 | 49.0001 | 4800000.0048 |
row_48 | bytes for 48 | t | character row 48 | character
varying row 48
+ 49 | 49 | 4900000000000 | 50.0001 | 4900000.0049 |
row_49 | bytes for 49 | f | character row 49 | character
varying row 49
+ 50 | 50 | 5000000000000 | 51.0001 | 5000000.005 |
row_50 | bytes for 50 | t | character row 50 | character
varying row 50
+ 51 | 51 | 5100000000000 | 52.0001 | 5100000.0051 |
row_51 | bytes for 51 | f | character row 51 | character
varying row 51
+ 52 | 52 | 5200000000000 | 53.0001 | 5200000.0052 |
row_52 | bytes for 52 | t | character row 52 | character
varying row 52
+ 53 | 53 | 5300000000000 | 54.0001 | 5300000.0053 |
row_53 | bytes for 53 | f | character row 53 | character
varying row 53
+ 54 | 54 | 5400000000000 | 55.0001 | 5400000.0054 |
row_54 | bytes for 54 | t | character row 54 | character
varying row 54
+ 55 | 55 | 5500000000000 | 56.0001 | 5500000.0055 |
row_55 | bytes for 55 | f | character row 55 | character
varying row 55
+ 56 | 56 | 5600000000000 | 57.0001 | 5600000.0056 |
row_56 | bytes for 56 | t | character row 56 | character
varying row 56
+ 57 | 57 | 5700000000000 | 58.0001 | 5700000.0057 |
row_57 | bytes for 57 | f | character row 57 | character
varying row 57
+ 58 | 58 | 5800000000000 | 59.0001 | 5800000.0058 |
row_58 | bytes for 58 | t | character row 58 | character
varying row 58
+ 59 | 59 | 5900000000000 | 60.0001 | 5900000.0059 |
row_59 | bytes for 59 | f | character row 59 | character
varying row 59
+ 60 | 60 | 6000000000000 | 61.0001 | 6000000.006 |
row_60 | bytes for 60 | t | character row 60 | character
varying row 60
+ 61 | 61 | 6100000000000 | 62.0001 | 6100000.0061 |
row_61 | bytes for 61 | f | character row 61 | character
varying row 61
+ 62 | 62 | 6200000000000 | 63.0001 | 6200000.0062 |
row_62 | bytes for 62 | t | character row 62 | character
varying row 62
+ 63 | 63 | 6300000000000 | 64.0001 | 6300000.0063 |
row_63 | bytes for 63 | f | character row 63 | character
varying row 63
+ 64 | 64 | 6400000000000 | 65.0001 | 6400000.0064 |
row_64 | bytes for 64 | t | character row 64 | character
varying row 64
+ 65 | 65 | 6500000000000 | 66.0001 | 6500000.0065 |
row_65 | bytes for 65 | f | character row 65 | character
varying row 65
+ 66 | 66 | 6600000000000 | 67.0001 | 6600000.0066 |
row_66 | bytes for 66 | t | character row 66 | character
varying row 66
+ 67 | 67 | 6700000000000 | 68.0001 | 6700000.0067 |
row_67 | bytes for 67 | f | character row 67 | character
varying row 67
+ 68 | 68 | 6800000000000 | 69.0001 | 6800000.0068 |
row_68 | bytes for 68 | t | character row 68 | character
varying row 68
+ 69 | 69 | 6900000000000 | 70.0001 | 6900000.0069 |
row_69 | bytes for 69 | f | character row 69 | character
varying row 69
+ 70 | 70 | 7000000000000 | 71.0001 | 7000000.007 |
row_70 | bytes for 70 | t | character row 70 | character
varying row 70
+ 71 | 71 | 7100000000000 | 72.0001 | 7100000.0071 |
row_71 | bytes for 71 | f | character row 71 | character
varying row 71
+ 72 | 72 | 7200000000000 | 73.0001 | 7200000.0072 |
row_72 | bytes for 72 | t | character row 72 | character
varying row 72
+ 73 | 73 | 7300000000000 | 74.0001 | 7300000.0073 |
row_73 | bytes for 73 | f | character row 73 | character
varying row 73
+ 74 | 74 | 7400000000000 | 75.0001 | 7400000.0074 |
row_74 | bytes for 74 | t | character row 74 | character
varying row 74
+ 75 | 75 | 7500000000000 | 76.0001 | 7500000.0075 |
row_75 | bytes for 75 | f | character row 75 | character
varying row 75
+ 76 | 76 | 7600000000000 | 77.0001 | 7600000.0076 |
row_76 | bytes for 76 | t | character row 76 | character
varying row 76
+ 77 | 77 | 7700000000000 | 78.0001 | 7700000.0077 |
row_77 | bytes for 77 | f | character row 77 | character
varying row 77
+ 78 | 78 | 7800000000000 | 79.0001 | 7800000.0078 |
row_78 | bytes for 78 | t | character row 78 | character
varying row 78
+ 79 | 79 | 7900000000000 | 80.0001 | 7900000.0079 |
row_79 | bytes for 79 | f | character row 79 | character
varying row 79
+ 80 | 80 | 8000000000000 | 81.0001 | 8000000.008 |
row_80 | bytes for 80 | t | character row 80 | character
varying row 80
+ 81 | 81 | 8100000000000 | 82.0001 | 8100000.0081 |
row_81 | bytes for 81 | f | character row 81 | character
varying row 81
+ 82 | 82 | 8200000000000 | 83.0001 | 8200000.0082 |
row_82 | bytes for 82 | t | character row 82 | character
varying row 82
+ 83 | 83 | 8300000000000 | 84.0001 | 8300000.0083 |
row_83 | bytes for 83 | f | character row 83 | character
varying row 83
+ 84 | 84 | 8400000000000 | 85.0001 | 8400000.0084 |
row_84 | bytes for 84 | t | character row 84 | character
varying row 84
+ 85 | 85 | 8500000000000 | 86.0001 | 8500000.0085 |
row_85 | bytes for 85 | f | character row 85 | character
varying row 85
+ 86 | 86 | 8600000000000 | 87.0001 | 8600000.0086 |
row_86 | bytes for 86 | t | character row 86 | character
varying row 86
+ 87 | 87 | 8700000000000 | 88.0001 | 8700000.0087 |
row_87 | bytes for 87 | f | character row 87 | character
varying row 87
+ 88 | 88 | 8800000000000 | 89.0001 | 8800000.0088 |
row_88 | bytes for 88 | t | character row 88 | character
varying row 88
+ 89 | 89 | 8900000000000 | 90.0001 | 8900000.0089 |
row_89 | bytes for 89 | f | character row 89 | character
varying row 89
+ 90 | 90 | 9000000000000 | 91.0001 | 9000000.009 |
row_90 | bytes for 90 | t | character row 90 | character
varying row 90
+ 91 | 91 | 9100000000000 | 92.0001 | 9100000.0091 |
row_91 | bytes for 91 | f | character row 91 | character
varying row 91
+ 92 | 92 | 9200000000000 | 93.0001 | 9200000.0092 |
row_92 | bytes for 92 | t | character row 92 | character
varying row 92
+ 93 | 93 | 9300000000000 | 94.0001 | 9300000.0093 |
row_93 | bytes for 93 | f | character row 93 | character
varying row 93
+ 94 | 94 | 9400000000000 | 95.0001 | 9400000.0094 |
row_94 | bytes for 94 | t | character row 94 | character
varying row 94
+ 95 | 95 | 9500000000000 | 96.0001 | 9500000.0095 |
row_95 | bytes for 95 | f | character row 95 | character
varying row 95
+ 96 | 96 | 9600000000000 | 97.0001 | 9600000.0096 |
row_96 | bytes for 96 | t | character row 96 | character
varying row 96
+ 97 | 97 | 9700000000000 | 98.0001 | 9700000.0097 |
row_97 | bytes for 97 | f | character row 97 | character
varying row 97
+ 98 | 98 | 9800000000000 | 99.0001 | 9800000.0098 |
row_98 | bytes for 98 | t | character row 98 | character
varying row 98
+ 99 | 99 | 9900000000000 | 100 | 9900000.0099 |
row_99 | bytes for 99 | f | character row 99 | character
varying row 99
+ 100 | 100 | 10000000000000 | 101 | 10000000.01 |
row_100 | bytes for 100 | t | character row 100 | character
varying row 100
+(100 rows)
diff --git
a/automation/sqlrepo/features/hdfs/writable/avro/codec/sql/query01.sql
b/automation/sqlrepo/features/hdfs/writable/avro/codec/sql/query01.sql
new file mode 100644
index 00000000..1f7c129a
--- /dev/null
+++ b/automation/sqlrepo/features/hdfs/writable/avro/codec/sql/query01.sql
@@ -0,0 +1,3 @@
+-- @description query01 for PXF HDFS Writable Avro codecs test
+
+SELECT * from writable_avro_codec_readable ORDER BY type_int;
diff --git
a/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsReadableAvroTest.java
b/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsReadableAvroTest.java
index 14dfe1af..bcf7f39a 100755
---
a/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsReadableAvroTest.java
+++
b/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsReadableAvroTest.java
@@ -451,7 +451,7 @@ public class HdfsReadableAvroTest extends BaseFeature {
}
/**
- * Read simple Avro file with Snappy and deflate compressions.
+ * Read simple Avro file with all supported compressions.
*
* @throws Exception
*/
@@ -460,7 +460,7 @@ public class HdfsReadableAvroTest extends BaseFeature {
String schemaName = resourcePath + avroInSequenceArraysSchemaFile;
Table dataTable = new Table("dataTable", null);
- String[] codecs = {"snappy", "deflate"};
+ String[] codecs = {"snappy", "deflate", "bzip2", "zstandard", "xz"};
for (String codec : codecs) {
String fileName = hdfsPath + codec + SUFFIX_AVRO;
diff --git
a/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsWritableAvroTest.java
b/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsWritableAvroTest.java
index 36dd9ae7..28b75170 100644
---
a/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsWritableAvroTest.java
+++
b/automation/src/test/java/org/apache/cloudberry/pxf/automation/features/avro/HdfsWritableAvroTest.java
@@ -315,6 +315,31 @@ public class HdfsWritableAvroTest extends
BaseWritableFeature {
runSqlTest("features/hdfs/writable/avro/null_values");
}
+ /**
+ * Write simple Avro file with all supported compressions.
+ *
+ * @throws Exception
+ */
+ @Test(groups = {"features", "gpdb", "hcfs", "security"})
+ public void avroCodecs() throws Exception {
+ String[] codecs = {"snappy", "deflate", "bzip2", "zstandard", "xz"};
+ for (String codec : codecs) {
+ tableNamePrefix = "writable_avro_codec";
+ fullTestPath = hdfsWritePath + "avro_" + codec + "_codecs";
+ prepareWritableExternalTable(tableNamePrefix,
AVRO_PRIMITIVE_WRITABLE_TABLE_COLS, fullTestPath);
+ writableExTable.setUserParameters(new
String[]{"COMPRESSION_CODEC=" + codec});
+ gpdb.createTableAndVerify(writableExTable);
+
+ prepareReadableExternalTable(tableNamePrefix,
AVRO_PRIMITIVE_READABLE_TABLE_COLS, fullTestPath);
+ gpdb.createTableAndVerify(readableExTable);
+
+ attemptInsert(() -> insertPrimitives(writableExTable),
fullTestPath, NUM_RETRIES);
+
+ // check using GPDB readable external table that what went into
HCFS is correct
+ runSqlTest("features/hdfs/writable/avro/codec");
+ }
+ }
+
@Override
protected void afterMethod() throws Exception {
if (ProtocolUtils.getPxfTestKeepData().equals("true")) {
diff --git a/docs/content/access_s3.html.md.erb
b/docs/content/access_s3.html.md.erb
index ed944192..2db26a0b 100644
--- a/docs/content/access_s3.html.md.erb
+++ b/docs/content/access_s3.html.md.erb
@@ -7,7 +7,7 @@ PXF is installed with a connector to the AWS S3 object store.
PXF supports the f
- Overriding the S3 credentials specified in the server configuration by
providing them in the `CREATE EXTERNAL TABLE` command DDL.
- Using the Amazon S3 Select service to read certain CSV and Parquet data from
S3.
-## <a id="s3_override"></a>Overriding the S3 Server Configuration with DDL
+## <a id="s3_override_ext"></a>Overriding the S3 Server Configuration For
External Tables
If you are accessing an S3-compatible object store, you can override the
credentials in an S3 server configuration by directly specifying the S3 access
ID and secret key via these custom options in the `CREATE EXTERNAL TABLE`
`LOCATION` clause:
@@ -27,6 +27,36 @@ PXF does not support overriding Azure, Google Cloud Storage,
and MinIO server cr
Refer to [Configuration Property Precedence](cfg_server.html#override) for
detailed information about the precedence rules that PXF uses to obtain
configuration property settings for a Greenplum Database user.
+## <a id="s3_override_fdw"></a>Overriding the S3 Server Configuration For
Foreign Tables
+
+PXF supports accessing S3 data using the Foreign Data Wrapper (FDW) framework,
which provides an alternative to the External Table Framework. To access S3
using Foreign Data Wrappers, you must first create a server that defines the S3
connection parameters.
+
+The following command creates a server named `s3srvcfg` that uses the
`s3_pxf_fdw` foreign data wrapper. It will use credentials from PXF server
configuration.
+
+<pre>CREATE SERVER s3srvcfg FOREIGN DATA WRAPPER s3_pxf_fdw;</pre>
+
+However, you can override the custom options by using the `OPTIONS` clause in
the `CREATE SERVER` command. The following command creates a server named
`s3srvcfg` that uses the `s3_pxf_fdw` foreign data wrapper and specifies the S3
access credentials:
+
+<pre>CREATE SERVER s3srvcfg FOREIGN DATA WRAPPER s3_pxf_fdw
+ OPTIONS (accesskey 'YOURKEY', secretkey 'YOURSECRET');</pre>
+
+Replace `YOURKEY` with your AWS S3 access key ID and `YOURSECRET` with your
AWS S3 secret access key.
+
+
+Following options are supported:
+| Option | Description |
+|-------|-------------------------------------|
+| accesskey | The AWS S3 account access key ID. |
+| secretkey | The secret key associated with the AWS S3 access key ID. |
+
+<div class="note warning"><b>Warning:</b> Credentials that you provide in this
manner are visible as part of the external table definition. Do not use this
method of passing credentials in a production environment.</div>
+
+PXF does not support overriding Azure, Google Cloud Storage, and MinIO server
credentials in this manner at this time.
+
+Refer to [Configuration Property Precedence](cfg_server.html#override) for
detailed information about the precedence rules that PXF uses to obtain
configuration property settings for a Greenplum Database user.
+
+
+<div class="note warning"><b>Warning:</b> Credentials that you provide in the
server configuration or user mapping are stored in the Greenplum Database
system catalogs. Ensure that you follow your organization's security policies
for managing database credentials.</div>
## <a id="s3_select"></a>Using the Amazon S3 Select Service
diff --git a/docs/content/hdfs_avro.html.md.erb
b/docs/content/hdfs_avro.html.md.erb
index 31207626..14a23346 100644
--- a/docs/content/hdfs_avro.html.md.erb
+++ b/docs/content/hdfs_avro.html.md.erb
@@ -24,7 +24,7 @@ under the License.
Use the PXF HDFS Connector to read and write Avro-format data. This section
describes how to use PXF to read and write Avro data in HDFS, including how to
create, query, and insert into an external table that references an Avro file
in the HDFS data store.
-PXF supports reading or writing Avro files compressed with these codecs:
`bzip2`, `xz`, `snappy`, and `deflate`.
+PXF supports reading or writing Avro files compressed with these codecs:
`bzip2`, `xz`, `snappy`, `zstandard` and `deflate`.
## <a id="prereq"></a>Prerequisites
@@ -150,11 +150,10 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
| SERVER=\<server_name\> | The named server configuration that PXF uses to
access the data. PXF uses the `default` server if not specified. |
| \<custom‑option\> | \<custom-option\>s are discussed below.|
| FORMAT 'CUSTOM' | Use `FORMAT` '`CUSTOM`' with
`(FORMATTER='pxfwritable_export')` (write) or
`(FORMATTER='pxfwritable_import')` (read). |
-| DISTRIBUTED BY | If you want to load data from an existing Greenplum
Database table into the writable external table, consider specifying the same
distribution policy or `<column_name>` on both tables. Doing so will avoid
extra motion of data between segments on the load operation. |
+| DISTRIBUTED BY | If you want to load data from an existing Greenplum
Database table into the writable external table, consider specifying the same
distribution policy or `<column_name>` on both tables. Doing so will avoid
extra motion of data between segments on the load operation. Use the `EXPLAIN`
command to analyze the query execution plan and verify that the distribution
policy minimizes data movement between segments. |
<a id="customopts"></a>
-
For complex types, the PXF `hdfs:avro` profile inserts default delimiters
between collection items and values before display. You can use non-default
delimiter characters by identifying values for specific `hdfs:avro` custom
options in the `CREATE EXTERNAL TABLE` command.
The `hdfs:avro` profile supports the following \<custom-option\>s:
@@ -171,8 +170,40 @@ The PXF `hdfs:avro` profile supports encoding- and
compression-related write opt
| Write Option | Value Description |
|-------|-------------------------------------|
-| COMPRESSION_CODEC | The compression codec alias. Supported compression
codecs for writing Avro data include: `bzip2`, `xz`, `snappy`, `deflate`, and
`uncompressed` . If this option is not provided, PXF compresses the data using
`deflate` compression. |
-| CODEC_LEVEL | The compression level (applicable to the `deflate` and `xz`
codecs only). This level controls the trade-off between speed and compression.
Valid values are 1 (fastest) to 9 (most compressed). The default compression
level is 6. |
+| COMPRESSION_CODEC | The compression codec alias. Supported compression
codecs for writing Avro data include: `bzip2`, `xz`, `snappy`, `deflate`,
`zstandard` and `uncompressed` . If this option is not provided, PXF compresses
the data using `deflate` compression. |
+| CODEC_LEVEL | The compression level (applicable to the `deflate`,
`zstandard` and `xz` codecs only). This level controls the trade-off between
speed and compression. Valid values are 1 (fastest) to 9 (most compressed). The
default compression level is 6. |
+
+## <a id="profile_cfdw"></a>Creating the Foreign Table
+
+Use one of the following foreign data wrappers with `format 'avro'`.
+
+| Object Store | Foreign Data Wrapper |
+|-------|-------------------------------------|
+| Azure Blob Storage | wasbs_pxf_fdw |
+| Azure Data Lake Storage Gen2 | abfss_pxf_fdw |
+| Google Cloud Storage | gs_pxf_fdw |
+| MinIO | s3_pxf_fdw |
+| S3 | s3_pxf_fdw |
+
+The following syntax creates a Greenplum Database foreign table that
references an Avro-format file:
+
+``` sql
+CREATE SERVER <foreign_server> FOREIGN DATA WRAPPER <store>_pxf_fdw;
+CREATE USER MAPPING FOR <user_name> SERVER <foreign_server>;
+
+CREATE FOREIGN TABLE [ IF NOT EXISTS ] <table_name>
+ ( <column_name> <data_type> [, ...] | LIKE <other_table> )
+ SERVER <foreign_server>
+ OPTIONS ( resource '<path-to-file>', format 'avro' [, <custom-option>
'<value>'[...]]);
+```
+
+| Keyword | Value |
+|-------|-------------------------------------|
+| \<foreign_server\> | The named server configuration that PXF uses to
access the data. You can override credentials in `CREATE SERVER` statement as
described in [Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_fdw) |
+| resource \<path‑to‑file\> | The path to the directory or file
in the object store. When the `<server_name>` configuration includes a
[`pxf.fs.basePath`](cfg_server.html#pxf-fs-basepath) property setting, PXF
considers \<path‑to‑file\> to be relative to the base path
specified. Otherwise, PXF considers it to be an absolute path.
\<path‑to‑file\> must not specify a relative path nor include the
dollar sign (`$`) character. |
+| format 'avro' | File format specification. |
+| \<custom‑option\>=\<value\> | Avro-specific custom options are
described [above](hdfs_avro.html#customopts). |
+
## <a id="avro_example"></a>Example: Reading Avro Data
diff --git a/docs/content/objstore_avro.html.md.erb
b/docs/content/objstore_avro.html.md.erb
index 4e102e96..708c1df8 100644
--- a/docs/content/objstore_avro.html.md.erb
+++ b/docs/content/objstore_avro.html.md.erb
@@ -73,7 +73,39 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
| \<custom‑option\>=\<value\> | Avro-specific custom options are
described in the [PXF HDFS Avro documentation](hdfs_avro.html#customopts). |
| FORMAT 'CUSTOM' | Use `FORMAT` '`CUSTOM`' with
`(FORMATTER='pxfwritable_export')` (write) or
`(FORMATTER='pxfwritable_import')` (read).|
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext).
+
+
+## <a id="avro_cfdw"></a>Creating the Foreign Table
+
+Use one of the following foreign data wrappers with `format 'avro'`.
+
+| Object Store | Foreign Data Wrapper |
+|-------|-------------------------------------|
+| Azure Blob Storage | wasbs_pxf_fdw |
+| Azure Data Lake Storage Gen2 | abfss_pxf_fdw |
+| Google Cloud Storage | gs_pxf_fdw |
+| MinIO | s3_pxf_fdw |
+| S3 | s3_pxf_fdw |
+
+The following syntax creates a Greenplum Database foreign table that
references an Avro-format file:
+
+``` sql
+CREATE SERVER <foreign_server> FOREIGN DATA WRAPPER <store>_pxf_fdw;
+CREATE USER MAPPING FOR <user_name> SERVER <foreign_server>;
+
+CREATE FOREIGN TABLE [ IF NOT EXISTS ] <table_name>
+ ( <column_name> <data_type> [, ...] | LIKE <other_table> )
+ SERVER <foreign_server>
+ OPTIONS ( resource '<path-to-file>', format 'avro' [, <custom-option>
'<value>'[...]]);
+```
+
+| Keyword | Value |
+|-------|-------------------------------------|
+| \<foreign_server\> | The named server configuration that PXF uses to
access the data. You can override credentials in `CREATE SERVER` statement as
described in [Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_fdw) |
+| resource \<path‑to‑file\> | The path to the directory or file
in the object store. When the `<server_name>` configuration includes a
[`pxf.fs.basePath`](cfg_server.html#pxf-fs-basepath) property setting, PXF
considers \<path‑to‑file\> to be relative to the base path
specified. Otherwise, PXF considers it to be an absolute path.
\<path‑to‑file\> must not specify a relative path nor include the
dollar sign (`$`) character. |
+| format 'avro' | File format specification. |
+| \<custom‑option\>=\<value\> | Avro-specific custom options are
described in the [PXF HDFS Avro documentation](hdfs_avro.html#customopts). |
## <a id="example"></a>Example
diff --git a/docs/content/objstore_fileasrow.html.md.erb
b/docs/content/objstore_fileasrow.html.md.erb
index 78032802..1687f9bf 100644
--- a/docs/content/objstore_fileasrow.html.md.erb
+++ b/docs/content/objstore_fileasrow.html.md.erb
@@ -44,7 +44,7 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
| FILE\_AS\_ROW=true | The required option that instructs PXF to read each
file into a single table row. |
| FORMAT | The `FORMAT` must specify `'CSV'`. |
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext_ext).
## <a id="example"></a>Example
diff --git a/docs/content/objstore_fixedwidth.html.md.erb
b/docs/content/objstore_fixedwidth.html.md.erb
index 898d937b..dba7e023 100644
--- a/docs/content/objstore_fixedwidth.html.md.erb
+++ b/docs/content/objstore_fixedwidth.html.md.erb
@@ -46,7 +46,7 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
**Note**: PXF does not support the `(HEADER)` formatter option in the `CREATE
EXTERNAL TABLE` command.
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext_ext).
## <a id="about_fields"></a>About Specifying field_name and width
diff --git a/docs/content/objstore_json.html.md.erb
b/docs/content/objstore_json.html.md.erb
index a2c4de18..9dcdb16c 100644
--- a/docs/content/objstore_json.html.md.erb
+++ b/docs/content/objstore_json.html.md.erb
@@ -69,7 +69,7 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
| \<custom‑option\>=\<value\> | JSON supports the custom options
described in the [PXF HDFS JSON documentation](hdfs_json.html#customopts). |
| FORMAT 'CUSTOM' | Use `FORMAT` `'CUSTOM'` with
`(FORMATTER='pxfwritable_export')` (write) or
`(FORMATTER='pxfwritable_import')` (read). |
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext).
## <a id="read_example"></a>Read Example
diff --git a/docs/content/objstore_orc.html.md.erb
b/docs/content/objstore_orc.html.md.erb
index eedb48b5..d24f2f0b 100644
--- a/docs/content/objstore_orc.html.md.erb
+++ b/docs/content/objstore_orc.html.md.erb
@@ -50,7 +50,7 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
| FORMAT 'CUSTOM' | Use `FORMAT` '`CUSTOM`' with
`(FORMATTER='pxfwritable_export')` (write) or
`(FORMATTER='pxfwritable_import')` (read). |
| DISTRIBUTED BY | If you want to load data from an existing Greenplum
Database table into the writable external table, consider specifying the same
distribution policy or `<column_name>` on both tables. Doing so will avoid
extra motion of data between segments on the load operation. |
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext).
## <a id="example"></a>Example
diff --git a/docs/content/objstore_parquet.html.md.erb
b/docs/content/objstore_parquet.html.md.erb
index 7295a179..e0c1f1cb 100644
--- a/docs/content/objstore_parquet.html.md.erb
+++ b/docs/content/objstore_parquet.html.md.erb
@@ -71,7 +71,7 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
If you are accessing an S3 object store:
-- You can provide S3 credentials via custom options in the `CREATE EXTERNAL
TABLE` command as described in [Overriding the S3 Server Configuration with
DDL](access_s3.html#s3_override).
+- You can provide S3 credentials via custom options in the `CREATE EXTERNAL
TABLE` command as described in [Overriding the S3 Server Configuration for
External Tables DDL](access_s3.html#s3_override_ext).
- If you are reading Parquet data from S3, you can direct PXF to use the S3
Select Amazon service to retrieve the data. Refer to [Using the Amazon S3
Select Service](access_s3.html#s3_select) for more information about the PXF
custom option used for this purpose.
## <a id="example"></a> Example
diff --git a/docs/content/objstore_seqfile.html.md.erb
b/docs/content/objstore_seqfile.html.md.erb
index 25ffb817..7f2681fe 100644
--- a/docs/content/objstore_seqfile.html.md.erb
+++ b/docs/content/objstore_seqfile.html.md.erb
@@ -64,7 +64,7 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
| FORMAT 'CUSTOM' | Use `FORMAT` '`CUSTOM`' with
`(FORMATTER='pxfwritable_export')` (write) or
`(FORMATTER='pxfwritable_import')` (read). |
| DISTRIBUTED BY | If you want to load data from an existing Greenplum
Database table into the writable external table, consider specifying the same
distribution policy or `<column_name>` on both tables. Doing so will avoid
extra motion of data between segments on the load operation. |
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext).
## <a id="example"></a>Example
diff --git a/docs/content/objstore_text.html.md.erb
b/docs/content/objstore_text.html.md.erb
index 3c5608ba..b1afeb56 100644
--- a/docs/content/objstore_text.html.md.erb
+++ b/docs/content/objstore_text.html.md.erb
@@ -66,7 +66,7 @@ The specific keywords and values used in the Greenplum
Database [CREATE EXTERNAL
If you are accessing an S3 object store:
-- You can provide S3 credentials via custom options in the `CREATE EXTERNAL
TABLE` command as described in [Overriding the S3 Server Configuration with
DDL](access_s3.html#s3_override).
+- You can provide S3 credentials via custom options in the `CREATE EXTERNAL
TABLE` command as described in [Overriding the S3 Server Configuration for
External Tables](access_s3.html#s3_override_ext).
- If you are reading CSV-format data from S3, you can direct PXF to use the S3
Select Amazon service to retrieve the data. Refer to [Using the Amazon S3
Select Service](access_s3.html#s3_select) for more information about the PXF
custom option used for this purpose.
@@ -175,7 +175,7 @@ The specific keywords and values used in the [CREATE
EXTERNAL TABLE](https://doc
**Note**: PXF does not support the `(HEADER)` formatter option in the `CREATE
EXTERNAL TABLE` command. If your text file includes header line(s), use
`SKIP_HEADER_COUNT` to specify the number of lines that PXF should skip at the
beginning of the first split of each file.
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext).
### <a id="profile_textmulti_query"></a>Example: Reading Multi-Line Text Data
from S3
@@ -283,7 +283,7 @@ Writable external tables that you create using an
`<objstore>:text|csv` profile
|-------|-------------------------------------|
| COMPRESSION_CODEC | The compression codec alias. Supported compression
codecs for writing text data include: `default`, `bzip2`, `gzip`, and
`uncompressed`. If this option is not provided, Greenplum Database performs no
data compression. |
-If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration with DDL](access_s3.html#s3_override).
+If you are accessing an S3 object store, you can provide S3 credentials via
custom options in the `CREATE EXTERNAL TABLE` command as described in
[Overriding the S3 Server Configuration for External
Tables](access_s3.html#s3_override_ext).
### <a id="write_s3textsimple_example"></a>Example: Writing Text Data to S3
diff --git a/docs/content/s3_objstore_cfg.html.md.erb
b/docs/content/s3_objstore_cfg.html.md.erb
index d541e336..a9393406 100644
--- a/docs/content/s3_objstore_cfg.html.md.erb
+++ b/docs/content/s3_objstore_cfg.html.md.erb
@@ -36,7 +36,7 @@ The template configuration file for S3 is
`<PXF_INSTALL_DIR>/templates/s3-site.x
If required, fine-tune PXF S3 connectivity by specifying properties identified
in the
[S3A](https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html#S3A)
section of the Hadoop-AWS module documentation in your `s3-site.xml` server
configuration file.
-You can override the credentials for an S3 server configuration by directly
specifying the S3 access ID and secret key via custom options in the `CREATE
EXTERNAL TABLE` command `LOCATION` clause. Refer to [Overriding the S3 Server
Configuration with DDL](access_s3.html#s3_override) for additional information.
+You can override the credentials for an S3 server configuration by directly
specifying the S3 access ID and secret key via custom options in the `CREATE
EXTERNAL TABLE` command `LOCATION` clause. Refer to [Overriding the S3 Server
Configuration for External Tables](access_s3.html#s3_override_ext) for
additional information.
### <a id="s3-sse"></a>Configuring S3 Server-Side Encryption
diff --git a/server/build.gradle b/server/build.gradle
index 00d1e22b..cafaec6b 100644
--- a/server/build.gradle
+++ b/server/build.gradle
@@ -188,10 +188,12 @@ configure(javaProjects) {
}
// Avro dependencies
- dependencySet(group:"org.apache.avro", version:"1.10.2") {
+ dependencySet(group:"org.apache.avro", version:"1.11.5") {
entry("avro")
entry("avro-mapred")
}
+ // Zstd support for Avro
+ dependency("com.github.luben:zstd-jni:1.5.7-6")
// Jackson 1.x dependencies
dependencySet(group:"org.codehaus.jackson", version:"1.9.13") {
diff --git a/server/pxf-hdfs/build.gradle b/server/pxf-hdfs/build.gradle
index a5951203..673e528e 100644
--- a/server/pxf-hdfs/build.gradle
+++ b/server/pxf-hdfs/build.gradle
@@ -54,6 +54,8 @@ dependencies {
// Dependencies for writing Avro files with compression
implementation("org.apache.commons:commons-compress") {
transitive = false }
implementation("org.tukaani:xz") {
transitive = false }
+ implementation("com.github.luben:zstd-jni") {
transitive = false }
+
// Hadoop 2.10.1 uses log4j:1.2.17, but since we are using slf4j
// we bring log4j-over-slf4j which provides compatibility for log4j
diff --git
a/server/pxf-hdfs/src/main/java/org/apache/cloudberry/pxf/plugins/hdfs/AvroFileAccessor.java
b/server/pxf-hdfs/src/main/java/org/apache/cloudberry/pxf/plugins/hdfs/AvroFileAccessor.java
index 714166fd..47a751ea 100644
---
a/server/pxf-hdfs/src/main/java/org/apache/cloudberry/pxf/plugins/hdfs/AvroFileAccessor.java
+++
b/server/pxf-hdfs/src/main/java/org/apache/cloudberry/pxf/plugins/hdfs/AvroFileAccessor.java
@@ -55,6 +55,7 @@ public class AvroFileAccessor extends
HdfsSplittableDataAccessor {
private static final String SNAPPY_CODEC = "snappy";
private static final String BZIP2_CODEC = "bzip2";
private static final String XZ_CODEC = "xz";
+ private static final String ZSTD_CODEC = "zstandard";
private AvroWrapper<GenericRecord> avroWrapper;
private DataFileWriter<GenericRecord> writer;
private long rowsWritten, rowsRead;
@@ -159,6 +160,9 @@ public class AvroFileAccessor extends
HdfsSplittableDataAccessor {
case XZ_CODEC:
writer.setCodec(CodecFactory.xzCodec(codecCompressionLevel));
break;
+ case ZSTD_CODEC:
+
writer.setCodec(CodecFactory.zstandardCodec(codecCompressionLevel));
+ break;
case NO_CODEC:
writer.setCodec(CodecFactory.nullCodec());
break;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]