This is an automated email from the ASF dual-hosted git repository.

aasha pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new b92b89b  HIVE-25550. Increase the RM_PROGRESS column to accommodate 
the metrics stat. (#2668)(Ayush Saxena, reviewed by Aasha Medhi)
b92b89b is described below

commit b92b89b412ffaf5d125247958c08f66b83cb1db7
Author: Ayush Saxena <ayushsax...@apache.org>
AuthorDate: Mon Oct 4 14:14:50 2021 +0530

    HIVE-25550. Increase the RM_PROGRESS column to accommodate the metrics 
stat. (#2668)(Ayush Saxena, reviewed by Aasha Medhi)
---
 .../hadoop/hive/ql/exec/repl/ReplStatsTracker.java | 36 +++++++++++++++-------
 .../repl/metric/ReplicationMetricCollector.java    |  4 ++-
 .../hive/ql/parse/repl/metric/event/Stage.java     |  9 +++++-
 .../metric/TestReplicationMetricCollector.java     | 28 +++++++++++++++++
 .../repl/metric/TestReplicationMetricSink.java     | 29 +++++++++++++++++
 .../apache/hadoop/hive/metastore/ObjectStore.java  |  8 ++++-
 .../src/main/resources/package.jdo                 |  2 +-
 .../src/main/sql/derby/hive-schema-4.0.0.derby.sql |  2 +-
 .../sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql     |  3 ++
 .../src/main/sql/mysql/hive-schema-4.0.0.mysql.sql |  2 +-
 .../sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql     |  3 ++
 .../sql/postgres/hive-schema-4.0.0.postgres.sql    |  2 +-
 .../postgres/upgrade-3.2.0-to-4.0.0.postgres.sql   |  3 ++
 .../upgrade-3.1.3000-to-4.0.0.postgres.sql         |  3 ++
 14 files changed, 116 insertions(+), 18 deletions(-)

diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java 
b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java
index 534a6ec..0d9683b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplStatsTracker.java
@@ -23,6 +23,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.management.ObjectName;
+import java.math.RoundingMode;
+import java.text.DecimalFormat;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Map;
@@ -33,6 +35,9 @@ import java.util.concurrent.ConcurrentHashMap;
  */
 public class ReplStatsTracker {
 
+  // Maintains the length of the RM_Progress column in the RDBMS, which stores 
the ReplStats
+  public static int RM_PROGRESS_LENGTH = 24000;
+
   // Maintains the descriptive statistics per event type.
   private ConcurrentHashMap<String, DescriptiveStatistics> descMap;
 
@@ -117,25 +122,34 @@ public class ReplStatsTracker {
     return lastEventId;
   }
 
+  private String formatDouble(DecimalFormat dFormat, Double d) {
+    if (!d.isNaN()) {
+      return dFormat.format(d);
+    }
+    return d.toString();
+  }
+
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
+    DecimalFormat dFormat = new DecimalFormat("#.##");
+    dFormat.setRoundingMode(RoundingMode.HALF_UP);
     sb.append("Replication Stats{");
     for (Map.Entry<String, DescriptiveStatistics> event : descMap.entrySet()) {
       DescriptiveStatistics statistics = event.getValue();
       sb.append("[[Event Name: ").append(event.getKey()).append("; ");
       sb.append("Total Number: ").append(statistics.getN()).append("; ");
-      sb.append("Total Time: ").append(statistics.getSum()).append("; ");
-      sb.append("Mean: ").append(statistics.getMean()).append("; ");
-      sb.append("Median: ").append(statistics.getPercentile(50)).append("; ");
-      sb.append("Standard Deviation: 
").append(statistics.getStandardDeviation()).append("; ");
-      sb.append("Variance: ").append(statistics.getVariance()).append("; ");
-      sb.append("Kurtosis: ").append(statistics.getKurtosis()).append("; ");
-      sb.append("Skewness: ").append(statistics.getKurtosis()).append("; ");
-      sb.append("25th Percentile: 
").append(statistics.getPercentile(25)).append("; ");
-      sb.append("50th Percentile: 
").append(statistics.getPercentile(50)).append("; ");
-      sb.append("75th Percentile: 
").append(statistics.getPercentile(75)).append("; ");
-      sb.append("90th Percentile: 
").append(statistics.getPercentile(90)).append("; ");
+      sb.append("Total Time: 
").append(dFormat.format(statistics.getSum())).append("; ");
+      sb.append("Mean: ").append(formatDouble(dFormat, 
statistics.getMean())).append("; ");
+      sb.append("Median: ").append(formatDouble(dFormat, 
statistics.getPercentile(50))).append("; ");
+      sb.append("Standard Deviation: ").append(formatDouble(dFormat, 
statistics.getStandardDeviation())).append("; ");
+      sb.append("Variance: ").append(formatDouble(dFormat, 
statistics.getVariance())).append("; ");
+      sb.append("Kurtosis: ").append(formatDouble(dFormat, 
statistics.getKurtosis())).append("; ");
+      sb.append("Skewness: ").append(formatDouble(dFormat, 
statistics.getSkewness())).append("; ");
+      sb.append("25th Percentile: ").append(formatDouble(dFormat, 
statistics.getPercentile(25))).append("; ");
+      sb.append("50th Percentile: ").append(formatDouble(dFormat, 
statistics.getPercentile(50))).append("; ");
+      sb.append("75th Percentile: ").append(formatDouble(dFormat, 
statistics.getPercentile(75))).append("; ");
+      sb.append("90th Percentile: ").append(formatDouble(dFormat, 
statistics.getPercentile(90))).append("; ");
       sb.append("Top ").append(k).append(" EventIds(EventId=Time) 
").append(topKEvents.get(event.getKey()))
           .append(";" + "]]");
     }
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
index b3357f9..88f8e74 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/ReplicationMetricCollector.java
@@ -119,7 +119,9 @@ public abstract class ReplicationMetricCollector {
       stage.setEndTime(System.currentTimeMillis());
       stage.setReplSnapshotsCount(replSnapshotCount);
       if (replStatsTracker != null && !(replStatsTracker instanceof 
NoOpReplStatsTracker)) {
-        stage.setReplStats(replStatsTracker.toString());
+        String replStatString = replStatsTracker.toString();
+        LOG.info("Replication Statistics are: {}", replStatString);
+        stage.setReplStats(replStatString);
       }
       progress.addStage(stage);
       replicationMetric.setProgress(progress);
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java 
b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java
index 83df9f0..4a54a8b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/repl/metric/event/Stage.java
@@ -24,6 +24,8 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import static 
org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker.RM_PROGRESS_LENGTH;
+
 /**
  * Class for defining the different stages of replication.
  */
@@ -127,7 +129,12 @@ public class Stage {
   }
 
   public void setReplStats(String replStats) {
-    this.replStats = replStats;
+    // Check the stat string doesn't surpass the RM_PROGRESS column length.
+    if (replStats.length() >= RM_PROGRESS_LENGTH - 2000) {
+      this.replStats = "RM_PROGRESS LIMIT EXCEEDED TO " + replStats.length();
+    } else {
+      this.replStats = replStats;
+    }
   }
 
 }
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
index 1e74d08..a9784a0 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils;
 import org.apache.hadoop.hive.ql.parse.SemanticException;
+import org.apache.hadoop.hive.ql.parse.repl.DumpType;
 import 
org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector;
 import 
org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector;
 import org.apache.hadoop.hive.ql.parse.repl.load.FailoverMetaData;
@@ -51,6 +52,7 @@ import java.util.Arrays;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Unit Test class for In Memory Replication Metric Collection.
@@ -457,4 +459,30 @@ public class TestReplicationMetricCollector {
 
     assertEquals(4, repl.getDescMap().get("EVENT_ADD_DATABASE").getN());
   }
+
+  @Test
+  public void testReplStatsTrackerLimit() {
+    ReplStatsTracker repl = new ReplStatsTracker(10);
+    // Check for k=10
+    generateStatsString(10, repl);
+    assertTrue("ReplStat string is " + repl.toString().length(), 
repl.toString().length() < 24000);
+    // Check for k=5
+    repl = new ReplStatsTracker(5);
+    generateStatsString(5, repl);
+    assertTrue("ReplStat string is " + repl.toString().length(), 
repl.toString().length() < 24000);
+    // Check for k=2 & check NaN values doesn't get messed up due to formatter
+    repl = new ReplStatsTracker(2);
+    generateStatsString(2, repl);
+    assertTrue(repl.toString().contains("NaN"));
+  }
+
+  private void generateStatsString(int k, ReplStatsTracker repl) {
+    DumpType[] types = DumpType.values();
+    for (DumpType type : types) {
+      for (int i = 0; i < k; i++) {
+        int eventId = 1000000 + i * type.ordinal();
+        repl.addEntry(type.toString(), Integer.toString(eventId), 10000 + i + 
( i * 1234));
+      }
+    }
+  }
 }
diff --git 
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
 
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
index 6ccae0e..dc7459d 100644
--- 
a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
+++ 
b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricSink.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.parse.repl.metric;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hive.conf.Constants;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.GetReplicationMetricsRequest;
@@ -28,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker;
 import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils;
 import org.apache.hadoop.hive.ql.exec.repl.util.SnapshotUtils;
 import org.apache.hadoop.hive.ql.metadata.Hive;
+import org.apache.hadoop.hive.ql.metadata.HiveException;
 import 
org.apache.hadoop.hive.ql.parse.repl.dump.metric.BootstrapDumpMetricCollector;
 import 
org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector;
 import org.apache.hadoop.hive.ql.parse.repl.load.FailoverMetaData;
@@ -40,6 +42,7 @@ import 
org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric;
 import org.apache.hadoop.hive.ql.parse.repl.metric.event.ProgressMapper;
 import org.apache.hadoop.hive.ql.parse.repl.metric.event.StageMapper;
 
+import org.apache.thrift.TException;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -53,6 +56,9 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
+import static 
org.apache.hadoop.hive.ql.exec.repl.ReplStatsTracker.RM_PROGRESS_LENGTH;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Unit Test class for In Memory Replication Metric Collection.
  */
@@ -300,4 +306,27 @@ public class TestReplicationMetricSink {
     }
   }
 
+  @Test
+  public void testReplStatsInMetrics() throws HiveException, 
InterruptedException, TException {
+    ReplicationMetricCollector incrementDumpMetricCollector =
+        new 
IncrementalDumpMetricCollector("testAcidTablesReplLoadBootstrapIncr_1592205875387",
+            
"hdfs://localhost:65158/tmp/org_apache_hadoop_hive_ql_parse_TestReplicationScenarios_245261428230295"
+                + 
"/hrepl0/dGVzdGFjaWR0YWJsZXNyZXBsbG9hZGJvb3RzdHJhcGluY3JfMTU5MjIwNTg3NTM4Nw==/0/hive",
 conf);
+    Map<String, Long> metricMap = new HashMap<>();
+    ReplStatsTracker repl = Mockito.mock(ReplStatsTracker.class);
+
+    
Mockito.when(repl.toString()).thenReturn(RandomStringUtils.randomAlphabetic(RM_PROGRESS_LENGTH));
+    metricMap.put(ReplUtils.MetricName.EVENTS.name(), (long) 10);
+    incrementDumpMetricCollector.reportStageStart("dump", metricMap);
+    incrementDumpMetricCollector.reportStageProgress("dump", 
ReplUtils.MetricName.EVENTS.name(), 10);
+    incrementDumpMetricCollector
+        .reportStageEnd("dump", Status.SUCCESS, 10, new 
SnapshotUtils.ReplSnapshotCount(), repl);
+    Thread.sleep(1000 * 20);
+    GetReplicationMetricsRequest metricsRequest = new 
GetReplicationMetricsRequest();
+    metricsRequest.setPolicy("repl");
+    ReplicationMetricList actualReplicationMetrics = 
Hive.get(conf).getMSC().getReplicationMetrics(metricsRequest);
+    
assertTrue(actualReplicationMetrics.getReplicationMetricList().get(0).getProgress(),
+        
actualReplicationMetrics.getReplicationMetricList().get(0).getProgress()
+            .contains("RM_PROGRESS LIMIT EXCEEDED"));
+  }
 }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 84f5952..1ceae64 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -14413,7 +14413,13 @@ public class ObjectStore implements RawStore, 
Configurable {
           mReplicationMetrics.setMetadata(replicationMetric.getMetadata());
         }
         if (!StringUtils.isEmpty(replicationMetric.getProgress())) {
-          mReplicationMetrics.setProgress(replicationMetric.getProgress());
+          // Check for the limit of RM_PROGRESS Column.
+          if ((dbType.isORACLE() && replicationMetric.getProgress().length() > 
4000)
+              || replicationMetric.getProgress().length() > 24000) {
+            mReplicationMetrics.setProgress("RM_PROGRESS LIMIT EXCEEDED");
+          } else {
+            mReplicationMetrics.setProgress(replicationMetric.getProgress());
+          }
         }
         mReplicationMetricsList.add(mReplicationMetrics);
       }
diff --git 
a/standalone-metastore/metastore-server/src/main/resources/package.jdo 
b/standalone-metastore/metastore-server/src/main/resources/package.jdo
index 0368a10..e31f076 100644
--- a/standalone-metastore/metastore-server/src/main/resources/package.jdo
+++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo
@@ -1556,7 +1556,7 @@
         <column name="RM_METADATA" jdbc-type="varchar" length="4000" 
allows-null="true"/>
       </field>
       <field name="progress">
-        <column name="RM_PROGRESS" jdbc-type="varchar" length="4000" 
allows-null="true"/>
+        <column name="RM_PROGRESS" jdbc-type="varchar" length="24000" 
allows-null="true"/>
       </field>
       <field name="startTime">
          <column name="RM_START_TIME" jdbc-type="integer" allows-null="false"/>
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
 
b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 453f647..525a909 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@ -803,7 +803,7 @@ CREATE TABLE "APP"."REPLICATION_METRICS" (
   "RM_POLICY" varchar(256) NOT NULL,
   "RM_DUMP_EXECUTION_ID" bigint NOT NULL,
   "RM_METADATA" varchar(4000),
-  "RM_PROGRESS" varchar(4000),
+  "RM_PROGRESS" varchar(24000),
   "RM_START_TIME" integer not null,
   PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID")
 );
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
 
b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
index 4fb2a2e..c9a5393 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
@@ -97,6 +97,9 @@ CREATE TABLE "APP"."REPLICATION_METRICS" (
   PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID")
 );
 
+--Increase the size of RM_PROGRESS to accomodate the replication statistics
+ALTER TABLE "APP"."REPLICATION_METRICS" ALTER "RM_PROGRESS" SET DATA TYPE 
VARCHAR(24000);
+
 CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY");
 CREATE INDEX "DUMP_IDX" ON "APP"."REPLICATION_METRICS" 
("RM_DUMP_EXECUTION_ID");
 
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
 
b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index fd965c6..4163199 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@ -1269,7 +1269,7 @@ CREATE TABLE IF NOT EXISTS REPLICATION_METRICS (
   RM_POLICY varchar(256) NOT NULL,
   RM_DUMP_EXECUTION_ID bigint NOT NULL,
   RM_METADATA varchar(4000),
-  RM_PROGRESS varchar(4000),
+  RM_PROGRESS varchar(24000),
   RM_START_TIME integer NOT NULL,
   PRIMARY KEY(RM_SCHEDULED_EXECUTION_ID)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
 
b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
index c4a6359..49451c4 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
@@ -104,6 +104,9 @@ CREATE TABLE IF NOT EXISTS REPLICATION_METRICS (
   PRIMARY KEY(RM_SCHEDULED_EXECUTION_ID)
 ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
 
+--Increase the size of RM_PROGRESS to accomodate the replication statistics
+ALTER TABLE REPLICATION_METRICS MODIFY RM_PROGRESS varchar(24000);
+
 -- Create indexes for the replication metrics table
 CREATE INDEX POLICY_IDX ON REPLICATION_METRICS (RM_POLICY);
 CREATE INDEX DUMP_IDX ON REPLICATION_METRICS (RM_DUMP_EXECUTION_ID);
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
 
b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index 482bac9..dc311bb 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@ -1976,7 +1976,7 @@ CREATE TABLE "REPLICATION_METRICS" (
   "RM_POLICY" varchar(256) NOT NULL,
   "RM_DUMP_EXECUTION_ID" bigint NOT NULL,
   "RM_METADATA" varchar(4000),
-  "RM_PROGRESS" varchar(4000),
+  "RM_PROGRESS" varchar(24000),
   "RM_START_TIME" integer NOT NULL,
   PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID")
 );
diff --git 
a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
 
b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
index b82253f..fbbba67 100644
--- 
a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
+++ 
b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
@@ -228,6 +228,9 @@ CREATE TABLE "REPLICATION_METRICS" (
   PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID")
 );
 
+--Increase the size of RM_PROGRESS to accomodate the replication statistics
+ALTER TABLE "REPLICATION_METRICS" ALTER "RM_PROGRESS" TYPE varchar(24000);
+
 --Create indexes for the replication metrics table
 CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY");
 CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID");
diff --git 
a/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql
 
b/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql
index 7851b08..c9ec652 100644
--- 
a/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql
+++ 
b/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0.postgres.sql
@@ -48,6 +48,9 @@ CREATE TABLE "REPLICATION_METRICS" (
     PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID")
     );
 
+--Increase the size of RM_PROGRESS to accomodate the replication statistics
+ALTER TABLE "REPLICATION_METRICS" ALTER "RM_PROGRESS" TYPE varchar(24000);
+
 --Create indexes for the replication metrics table
 CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY");
 CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID");

Reply via email to