This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 2a3f320f937 Fix the error on generating bki
2a3f320f937 is described below
commit 2a3f320f937bd660e33b9c053eb889d0e1d67e42
Author: Jinbao Chen <[email protected]>
AuthorDate: Tue Sep 16 17:39:11 2025 +0800
Fix the error on generating bki
---
src/backend/catalog/Catalog.pm | 120 ++++++++++++----------------
src/include/catalog/pg_attribute_encoding.h | 4 +-
src/include/catalog/pg_opfamily.dat | 2 +-
src/include/catalog/pg_proc.dat | 30 +++----
src/include/catalog/pg_publication_rel.h | 4 +-
src/include/catalog/pg_type_encoding.h | 4 +-
6 files changed, 73 insertions(+), 91 deletions(-)
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index 39349a2c839..3b7153affc8 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -285,90 +285,72 @@ sub ParseData
$input_file =~ /(\w+)\.dat$/
or die "Input file $input_file needs to be a .dat file.\n";
my $catname = $1;
- my $data = [];
+ my $data = [];
- if ($preserve_formatting)
+ # Scan the input file.
+ while (<$ifd>)
{
- # Scan the input file.
- while (<$ifd>)
- {
- my $hash_ref;
+ my $hash_ref;
- if (/{/)
+ if (/{/)
+ {
+ # Capture the hash ref
+ # NB: Assumes that the next hash ref can't start on the
+ # same line where the present one ended.
+ # Not foolproof, but we shouldn't need a full parser,
+ # since we expect relatively well-behaved input.
+
+ # Quick hack to detect when we have a full hash ref to
+ # parse. We can't just use a regex because of values in
+ # pg_aggregate and pg_proc like '{0,0}'. This will need
+ # work if we ever need to allow unbalanced braces within
+ # a field value.
+ my $lcnt = tr/{//;
+ my $rcnt = tr/}//;
+
+ if ($lcnt == $rcnt)
{
- # Capture the hash ref
- # NB: Assumes that the next hash ref can't
start on the
- # same line where the present one ended.
- # Not foolproof, but we shouldn't need a full
parser,
- # since we expect relatively well-behaved input.
-
- # Quick hack to detect when we have a full hash
ref to
- # parse. We can't just use a regex because of
values in
- # pg_aggregate and pg_proc like '{0,0}'. This
will need
- # work if we ever need to allow unbalanced
braces within
- # a field value.
- my $lcnt = tr/{//;
- my $rcnt = tr/}//;
-
- if ($lcnt == $rcnt)
+ # We're treating the input line as a piece of
Perl, so we
+ # need to use string eval here. Tell perlcritic
we know what
+ # we're doing.
+ eval '$hash_ref = ' . $_; ## no critic
(ProhibitStringyEval)
+ if (!ref $hash_ref)
{
- # We're treating the input line as a
piece of Perl, so we
- # need to use string eval here. Tell
perlcritic we know what
- # we're doing.
- eval "\$hash_ref = $_"; ## no critic
(ProhibitStringyEval)
- if (!ref $hash_ref)
- {
- die "$input_file: error parsing
line $.:\n$_\n";
- }
-
- # Annotate each hash with the source
line number.
- $hash_ref->{line_number} = $.;
-
- # Expand tuples to their full
representation.
- AddDefaultValues($hash_ref, $schema,
$catname);
- }
- else
- {
- my $next_line = <$ifd>;
- die "$input_file: file ends within Perl
hash\n"
- if !defined $next_line;
- $_ .= $next_line;
- redo;
+ die "$input_file: error parsing line
$.:\n$_\n";
}
- }
- # If we found a hash reference, keep it, unless it is
marked as
- # autogenerated; in that case it'd duplicate an entry
we'll
- # autogenerate below. (This makes it safe for
reformat_dat_file.pl
- # with --full-tuples to print autogenerated entries,
which seems like
- # useful behavior for debugging.)
- #
- # Otherwise, we have a non-data string, which we need
to keep in
- # order to preserve formatting.
- if (defined $hash_ref)
- {
- push @$data, $hash_ref if
!$hash_ref->{autogenerated};
+ # Annotate each hash with the source line
number.
+ $hash_ref->{line_number} = $.;
+
+ # Expand tuples to their full representation.
+ AddDefaultValues($hash_ref, $schema, $catname);
}
else
{
- push @$data, $_;
+ my $next_line = <$ifd>;
+ die "$input_file: file ends within Perl hash\n"
+ if !defined $next_line;
+ $_ .= $next_line;
+ redo;
}
}
- }
- else
- {
- # When we only care about the contents, it's faster to read and
eval
- # the whole file at once.
- local $/;
- my $full_file = <$ifd>;
- eval "\$data = $full_file" ## no critic (ProhibitStringyEval)
- or die "error parsing $input_file\n";
- foreach my $hash_ref (@{$data})
+
+ # If we found a hash reference, keep it, unless it is marked as
+ # autogenerated; in that case it'd duplicate an entry we'll
+ # autogenerate below. (This makes it safe for
reformat_dat_file.pl
+ # with --full-tuples to print autogenerated entries, which
seems like
+ # useful behavior for debugging.)
+ #
+ # Only keep non-data strings if we are told to preserve
formatting.
+ if (defined $hash_ref)
{
- AddDefaultValues($hash_ref, $schema, $catname);
+ push @$data, $hash_ref if !$hash_ref->{autogenerated};
+ }
+ elsif ($preserve_formatting)
+ {
+ push @$data, $_;
}
}
-
close $ifd;
# If this is pg_type, auto-generate array types too.
diff --git a/src/include/catalog/pg_attribute_encoding.h
b/src/include/catalog/pg_attribute_encoding.h
index 94bb67b102d..7c346cdb7aa 100644
--- a/src/include/catalog/pg_attribute_encoding.h
+++ b/src/include/catalog/pg_attribute_encoding.h
@@ -38,7 +38,7 @@ typedef int16 FileNumber;
* typedef struct FormData_pg_attribute_encoding
* ----------------
*/
-CATALOG(pg_attribute_encoding,6231,AttributeEncodingRelationId)
+CATALOG(pg_attribute_encoding,6422,AttributeEncodingRelationId)
{
Oid attrelid;
int16 attnum;
@@ -57,7 +57,7 @@ FOREIGN_KEY(attrelid REFERENCES pg_attribute(attrelid));
* ----------------
*/
typedef FormData_pg_attribute_encoding *Form_pg_attribute_encoding;
-DECLARE_TOAST(pg_attribute_encoding, 6233, 6234);
+DECLARE_TOAST(pg_attribute_encoding, 6412, 6427);
DECLARE_UNIQUE_INDEX(pg_attribute_encoding_attrelid_filenum_index, 6238, on
pg_attribute_encoding using btree(attrelid oid_ops, filenum int2_ops));
#define AttributeEncodingAttrelidFilenumIndexId 6238
diff --git a/src/include/catalog/pg_opfamily.dat
b/src/include/catalog/pg_opfamily.dat
index 85b5fa2cf96..23e88e1de38 100644
--- a/src/include/catalog/pg_opfamily.dat
+++ b/src/include/catalog/pg_opfamily.dat
@@ -306,7 +306,7 @@
opfmethod => 'gist', opfname => 'multirange_ops' },
-{ oid => '6221',
+{ oid => '6408',
opfmethod => 'btree', opfname => 'complex_ops' },
{ oid => '6224',
opfmethod => 'hash', opfname => 'complex_ops' },
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 1df8fd2b475..12e22d589f9 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -7530,7 +7530,7 @@
descr => 'total disk space usage for the specified table and associated
indexes',
proname => 'pg_total_relation_size', provolatile => 'v', proparallel => 'u',
prorettype => 'int8',
proargtypes => 'regclass', prosrc => 'pg_total_relation_size' },
-{ oid => '2137',
+{ oid => '6415',
descr => 'ao tables segment file count',
proname => 'gp_ao_segment_file_count', provolatile => 'v', prorettype =>
'int2',
proargtypes => 'regclass', prosrc => 'gp_ao_segment_file_count' },
@@ -7755,7 +7755,7 @@
{ oid => '3422', descr => 'SHA-512 hash',
proname => 'sha512', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'bytea', prosrc => 'sha512_bytea' },
-{ oid => '4544', descr => 'SM3 hash',
+{ oid => '6418', descr => 'SM3 hash',
proname => 'sm3', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'bytea', prosrc => 'sm3_bytea' },
@@ -11014,7 +11014,7 @@
proname => 'range_agg', prokind => 'a', proisstrict => 'f',
prorettype => 'anymultirange', proargtypes => 'anyrange',
prosrc => 'aggregate_dummy' },
-{ oid => '6225', descr => 'aggregate transition function',
+{ oid => '6401', descr => 'aggregate transition function',
proname => 'multirange_agg_transfn', proisstrict => 'f',
prorettype => 'internal', proargtypes => 'internal anymultirange',
prosrc => 'multirange_agg_transfn' },
@@ -12310,34 +12310,34 @@
prosrc => 'aggregate_dummy' },
# Cloudberry Analytic functions
-{ oid => 6212, descr => 'perform matrix addition on two conformable matrices',
+{ oid => 6405, descr => 'perform matrix addition on two conformable matrices',
proname => 'int2_matrix_accum', proisstrict => 'f', prorettype => '_int8',
proargtypes => '_int8 _int2', prosrc => 'matrix_add' },
-{ oid => 6213, descr => 'perform matrix addition on two conformable matrices',
+{ oid => 6407, descr => 'perform matrix addition on two conformable matrices',
proname => 'int4_matrix_accum', proisstrict => 'f', prorettype => '_int8',
proargtypes => '_int8 _int4', prosrc => 'matrix_add' },
-{ oid => 6214, descr => 'perform matrix addition on two conformable matrices',
+{ oid => 6406, descr => 'perform matrix addition on two conformable matrices',
proname => 'int8_matrix_accum', prorettype => '_int8', proargtypes =>
'_int8 _int8', prosrc => 'matrix_add' },
-{ oid => 6215, descr => 'perform matrix addition on two conformable matrices',
+{ oid => 6414, descr => 'perform matrix addition on two conformable matrices',
proname => 'float8_matrix_accum', prorettype => '_float8', proargtypes =>
'_float8 _float8', prosrc => 'matrix_add' },
-{ oid => 6216, descr => 'sum of matrixes',
+{ oid => 6426, descr => 'sum of matrixes',
proname => 'sum', prokind => 'a', proisstrict => 'f',
prorettype => '_int8', proargtypes => '_int2',
prosrc => 'aggregate_dummy' },
-{ oid => 6217, descr => 'sum of matrixes',
+{ oid => 6404, descr => 'sum of matrixes',
proname => 'sum', prokind => 'a', proisstrict => 'f',
prorettype => '_int8', proargtypes => '_int4',
prosrc => 'aggregate_dummy' },
-{ oid => 6218, descr => 'sum of matrixes',
+{ oid => 6410, descr => 'sum of matrixes',
proname => 'sum', prokind => 'a', proisstrict => 'f',
prorettype => '_int8', proargtypes => '_int8',
prosrc => 'aggregate_dummy' },
-{ oid => 6219, descr => 'sum of matrixes',
+{ oid => 6419, descr => 'sum of matrixes',
proname => 'sum', prokind => 'a', proisstrict => 'f',
prorettype => '_float8', proargtypes => '_float8',
prosrc => 'aggregate_dummy' },
@@ -12346,12 +12346,12 @@
{ oid => 6225, descr => 'aggregate transition function',
proname => 'int4_pivot_accum', proisstrict => 'f', prorettype => '_int8',
proargtypes => '_int8 _text text int4', prosrc => 'int4_pivot_accum' },
-{ oid => 6226, descr => 'pivot sum aggregate',
+{ oid => 6420, descr => 'pivot sum aggregate',
proname => 'pivot_sum', prokind => 'a', proisstrict => 'f',
prorettype => '_int8', proargtypes => '_text text int4',
prosrc => 'aggregate_dummy' },
-{ oid => 6227, descr => 'aggregate transition function',
+{ oid => 6413, descr => 'aggregate transition function',
proname => 'int8_pivot_accum', proisstrict => 'f', prorettype => '_int8',
proargtypes => '_int8 _text text int8', prosrc => 'int8_pivot_accum' },
{ oid => 6228, descr => 'pivot sum aggregate',
@@ -12362,7 +12362,7 @@
{ oid => 6229, descr => 'aggregate transition function',
proname => 'float8_pivot_accum', proisstrict => 'f', prorettype =>
'_float8', proargtypes => '_float8 _text text float8', prosrc =>
'float8_pivot_accum' },
-{ oid => 6230, descr => 'pivot sum aggregate',
+{ oid => 6409, descr => 'pivot sum aggregate',
proname => 'pivot_sum', prokind => 'a', proisstrict => 'f',
prorettype => '_float8', proargtypes => '_text text float8',
prosrc => 'aggregate_dummy' },
@@ -12838,7 +12838,7 @@
prorettype => 'gp_hyperloglog_estimator', proargtypes => 'anyelement',
prosrc => 'aggregate_dummy' },
-{ oid => 6232, descr => 'deparse DISTRIBUTED BY clause for a given relation',
+{ oid => 6423, descr => 'deparse DISTRIBUTED BY clause for a given relation',
proname => 'pg_get_table_distributedby', provolatile => 's', prorettype =>
'text', proargtypes => 'oid', prosrc => 'pg_get_table_distributedby' },
diff --git a/src/include/catalog/pg_publication_rel.h
b/src/include/catalog/pg_publication_rel.h
index 613e9747c2a..af56c5c0577 100644
--- a/src/include/catalog/pg_publication_rel.h
+++ b/src/include/catalog/pg_publication_rel.h
@@ -45,10 +45,10 @@ CATALOG(pg_publication_rel,6106,PublicationRelRelationId)
*/
typedef FormData_pg_publication_rel *Form_pg_publication_rel;
-DECLARE_TOAST(pg_publication_rel, 6228, 6229);
+DECLARE_TOAST(pg_publication_rel, 6411, 6425);
DECLARE_UNIQUE_INDEX_PKEY(pg_publication_rel_oid_index, 6112,
PublicationRelObjectIndexId, on pg_publication_rel using btree(oid oid_ops));
DECLARE_UNIQUE_INDEX(pg_publication_rel_prrelid_prpubid_index, 6113,
PublicationRelPrrelidPrpubidIndexId, on pg_publication_rel using btree(prrelid
oid_ops, prpubid oid_ops));
-DECLARE_INDEX(pg_publication_rel_prpubid_index, 6116,
PublicationRelPrpubidIndexId, on pg_publication_rel using btree(prpubid
oid_ops));
+DECLARE_INDEX(pg_publication_rel_prpubid_index, 6424,
PublicationRelPrpubidIndexId, on pg_publication_rel using btree(prpubid
oid_ops));
#endif /* PG_PUBLICATION_REL_H
*/
diff --git a/src/include/catalog/pg_type_encoding.h
b/src/include/catalog/pg_type_encoding.h
index d840b802bbf..884b2eb08b4 100644
--- a/src/include/catalog/pg_type_encoding.h
+++ b/src/include/catalog/pg_type_encoding.h
@@ -23,7 +23,7 @@
* typedef struct FormData_pg_type_encoding
* ----------------
*/
-CATALOG(pg_type_encoding,6220,TypeEncodingRelationId)
+CATALOG(pg_type_encoding,6421,TypeEncodingRelationId)
{
Oid typid;
#ifdef CATALOG_VARLEN /* variable-length fields start here */
@@ -40,6 +40,6 @@ FOREIGN_KEY(typid REFERENCES pg_type(oid));
* ----------------
*/
typedef FormData_pg_type_encoding *Form_pg_type_encoding;
-DECLARE_TOAST(pg_type_encoding, 6222, 6223);
+DECLARE_TOAST(pg_type_encoding, 6416, 6417);
#endif /* PG_TYPE_ENCODING_H */
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]