This is an automated email from the ASF dual-hosted git repository.

nkak pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/madlib.git

commit 4f62f4b1772ac8f46b3bbdfba5986ce7280b2da8
Author: Ekta Khanna <ekha...@pivotal.io>
AuthorDate: Fri Feb 14 02:02:56 2020 +0000

    DL: Fix disk issue by using truncate guc
    
    JIRA: MADLIB-1406
    
    While testing places10 with fit multiple (gpdb5 and gpdb6, 10 iterations 
and 20 msts), we ran out of disk space although we had at least 1.5T left at 
the beginning of the query.
    
    The main contributor to the disk bloat is the update statement that we run 
at the end of each hop
    
    ```
    UPDATE {self.model_output_table}
    SET {self.model_weights_col} = 
{self.weights_to_update_tbl}.{self.model_weights_col}
    FROM {self.weights_to_update_tbl}
    WHERE {self.model_output_table}.{self.mst_key_col} = 
{self.weights_to_update_tbl}.{self.mst_key_col}
    ```
    
    In postgres/gpdb, every update command is really two commands i.e. insert 
and then delete. Because of this, the actual space is not freed and only gets 
freed when vacuum is run consistently or vacuum full is run at the end. We 
verified this by printing the {self.model_output_table} size for each mst_key 
and it kept on growing with each update statement.
    
    Also the disk space for other intermediate tables that get created in the 
run_training function never gets cleared even though we drop these tables 
inside the said function. This is because drop/truncate does not release disk 
space inside a pl function since it's in a sub transaction (this is so that it 
can rollback). It only releases the space once the pl function has completed 
execution.
    
    The only way to make this work was to add a truncate statement and change 
the gpdb code to do a truncate inside a sub transaction. gpdb 6.5 introduced a 
guc 
https://github.com/greenplum-db/gpdb/commit/b4692794a0abd8d8051c23019d26e22f7f3d0aa5
 which when turned 'on' allows for truncating the disk space inside a sub 
transaction.
    
    Note that this guc is only available in gpdb 6.5 and up.
    
    run_training workflow (this function is called per hop)
    1. join schedule table with mst_weights table to do the hop
    2. Call the uda and copy the output to an intermediate table
    3. Update the model table with the results of the previous step
    4. set the truncate guc to on
    5. Create temp table from model table
    6. truncate the model table to release disk space
    7. rename temp table to model table so that it can be reused for the next 
hop
    
    Warm Start:
    For warm start, we can't keep calling truncate on the user passed output 
table
    because then we won't be able to roll it back in case of a failure. So for 
warm
    start, we create a copy of the output table passed by the user and then 
operate
    on the copied table. At the end, we drop the original output table and 
rename
    the copied table to the original table name.
    
    Co-authored-by: Ekta Khanna <ekha...@pivotal.io>
---
 .../madlib_keras_fit_multiple_model.py_in          | 241 +++++++++++++++------
 .../madlib_keras_fit_multiple_model.sql_in         |   7 +
 .../test/madlib_keras_model_selection.sql_in       |  30 +++
 .../test/madlib_keras_model_selection_e2e.sql_in   |  21 +-
 .../test/madlib_keras_transfer_learning.sql_in     |  71 +++---
 src/ports/postgres/modules/utilities/control.py_in |   3 +-
 .../utilities/test/unit_tests/test_control.py_in   |  12 +-
 7 files changed, 276 insertions(+), 109 deletions(-)

diff --git 
a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in
 
b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in
index a32421b..c6352b5 100644
--- 
a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in
+++ 
b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.py_in
@@ -34,6 +34,7 @@ from madlib_keras_wrapper import *
 
 from utilities.control import MinWarning
 from utilities.control import OptimizerControl
+from utilities.control import SetGUC
 from utilities.utilities import unique_string
 from utilities.utilities import add_postfix
 from utilities.utilities import rotate
@@ -41,6 +42,8 @@ from utilities.utilities import madlib_version
 from utilities.utilities import is_platform_pg
 from utilities.utilities import get_seg_number
 from utilities.utilities import get_segments_per_host
+from utilities.utilities import rename_table
+
 import json
 from collections import defaultdict
 import random
@@ -88,11 +91,7 @@ class FitMultipleModel():
         self.model_selection_table = model_selection_table
         if self.model_selection_table:
             self.model_selection_summary_table = 
add_postfix(self.model_selection_table, '_summary')
-        self.model_output_table = model_output_table
-        if self.model_output_table:
-            self.model_info_table = add_postfix(model_output_table, '_info')
-            self.model_summary_table = add_postfix(
-                model_output_table, '_summary')
+
         self.num_iterations = num_iterations
         self.metrics_compute_frequency = metrics_compute_frequency
         self.name = name
@@ -105,8 +104,8 @@ class FitMultipleModel():
         self.compile_params_col = ModelSelectionSchema.COMPILE_PARAMS
         self.fit_params_col = ModelSelectionSchema.FIT_PARAMS
         self.model_arch_table_col = ModelSelectionSchema.MODEL_ARCH_TABLE
-        self.model_weights_col=ModelArchSchema.MODEL_WEIGHTS
-        self.model_arch_col=ModelArchSchema.MODEL_ARCH
+        self.model_weights_col = ModelArchSchema.MODEL_WEIGHTS
+        self.model_arch_col = ModelArchSchema.MODEL_ARCH
         self.train_mst_metric_eval_time = defaultdict(list)
         self.train_mst_loss = defaultdict(list)
         self.train_mst_metric = defaultdict(list)
@@ -116,21 +115,41 @@ class FitMultipleModel():
         self.use_gpus = use_gpus
         self.segments_per_host = get_segments_per_host()
         if self.use_gpus:
-            self.accessible_gpus_for_seg = 
get_accessible_gpus_for_seg(self.schema_madlib,
-                self.segments_per_host, self.module_name)
+            self.accessible_gpus_for_seg = get_accessible_gpus_for_seg(
+                self.schema_madlib, self.segments_per_host, self.module_name)
         else:
             self.accessible_gpus_for_seg = get_seg_number()*[0]
 
+        self.original_model_output_table = model_output_table
+        if self.original_model_output_table:
+            self.model_info_table = 
add_postfix(self.original_model_output_table, '_info')
+            self.model_summary_table = add_postfix(
+                self.original_model_output_table, '_summary')
+
+        self.model_output_table = self.original_model_output_table
+
+        """
+        For warm start, we need to copy the model output table to a temp table
+        because we call truncate on the model output table while training.
+        If the query gets aborted, we need to make sure that the user passed
+        model output table can be recovered.
+        """
+        self.warm_start = bool(warm_start)
+        self.warm_start_msts = []
+        if self.warm_start:
+            self.model_output_table = unique_string('initial_model')
+
         self.fit_validator_train = FitMultipleInputValidator(
-            self.source_table, self.validation_table, self.model_output_table,
+            self.source_table, self.validation_table, 
self.original_model_output_table,
             self.model_selection_table, self.model_selection_summary_table,
             mb_dep_var_col, mb_indep_var_col, self.num_iterations,
             self.model_info_table, self.mst_key_col, self.model_arch_table_col,
-            self.metrics_compute_frequency, warm_start, self.use_gpus,
+            self.metrics_compute_frequency, self.warm_start, self.use_gpus,
             self.accessible_gpus_for_seg)
         if self.metrics_compute_frequency is None:
             self.metrics_compute_frequency = num_iterations
-        self.warm_start = bool(warm_start)
+
+
         self.msts = self.fit_validator_train.msts
         self.model_arch_table = self.fit_validator_train.model_arch_table
         self.metrics_iters = []
@@ -156,16 +175,26 @@ class FitMultipleModel():
         self.dist_keys = query_dist_keys(self.source_table, dist_key_col)
         if len(self.msts) < len(self.dist_keys):
             self.msts_for_schedule = self.msts + [None] * \
-                (len(self.dist_keys) - len(self.msts))
+                                     (len(self.dist_keys) - len(self.msts))
         else:
             self.msts_for_schedule = self.msts
         random.shuffle(self.msts_for_schedule)
         self.grand_schedule = self.generate_schedule(self.msts_for_schedule)
         self.gp_segment_id_col = '0' if is_platform_pg() else 
GP_SEGMENT_ID_COLNAME
 
-        self.create_model_output_table()
+        if self.warm_start:
+            self.create_model_output_table_warm_start()
+        else:
+            self.create_model_output_table()
+
         self.weights_to_update_tbl = unique_string(desp='weights_to_update')
         self.fit_multiple_model()
+
+        # Update and cleanup metadata tables
+        self.insert_info_table()
+        self.create_model_summary_table()
+        if self.warm_start:
+            self.cleanup_for_warm_start()
         reset_cuda_env(original_cuda_env)
 
     def fit_multiple_model(self):
@@ -175,8 +204,18 @@ class FitMultipleModel():
             self.metrics_elapsed_start_time = time.time()
             self.train_multiple_model()
             self.end_training_time = datetime.datetime.now()
-            self.insert_info_table()
-            self.create_model_summary_table()
+
+    def cleanup_for_warm_start(self):
+        """
+        1. drop original model table
+        2. rename temp to original
+        :return:
+        """
+        drop_query = "DROP TABLE IF EXISTS {}".format(
+            self.original_model_output_table)
+        plpy.execute(drop_query)
+        rename_table(self.schema_madlib, self.model_output_table,
+                     self.original_model_output_table)
 
     def train_multiple_model(self):
         total_msts = len(self.msts_for_schedule)
@@ -187,11 +226,13 @@ class FitMultipleModel():
                 self.create_mst_schedule_table(mst_row)
                 if mst_idx == 0:
                     start_iteration = time.time()
-                self.run_training()
+                self.run_training(mst_idx)
                 if mst_idx == (total_msts - 1):
                     end_iteration = time.time()
-                    self.info_str = "\tTime for training in iteration {0}: {1} 
sec\n".format(iter,
-                                        end_iteration - start_iteration)
+                    self.info_str = "\tTime for training in iteration " \
+                                    "{0}: {1} sec\n".format(iter,
+                                                            end_iteration -
+                                                            start_iteration)
             if should_compute_metrics_this_iter(iter,
                                                 self.metrics_compute_frequency,
                                                 self.num_iterations):
@@ -218,7 +259,7 @@ class FitMultipleModel():
             self.info_str += "\n\tValidation set after iteration 
{0}:".format(epoch)
         for mst in self.msts:
             weights = query_weights(self.model_output_table, 
self.model_weights_col,
-                self.mst_key_col, mst[self.mst_key_col])
+                                    self.mst_key_col, mst[self.mst_key_col])
             model_arch, _ = get_model_arch_weights(self.model_arch_table, 
mst[self.model_id_col])
             _, metric, loss = compute_loss_and_metrics(
                 self.schema_madlib, table, "$madlib${0}$madlib$".format(
@@ -274,27 +315,44 @@ class FitMultipleModel():
                                 """.format(**locals())
             plpy.execute(mst_insert_query)
 
+
     def create_model_output_table(self):
-        warm_start_msts = []
-        if self.warm_start:
-            plpy.execute(""" DELETE FROM {self.model_output_table}
+        output_table_create_query = """
+                                    CREATE TABLE {self.model_output_table}
+                                    ({self.mst_key_col} INTEGER PRIMARY KEY,
+                                     {self.model_weights_col} BYTEA,
+                                     {self.model_arch_col} JSON)
+                                    """.format(self=self)
+        plpy.execute(output_table_create_query)
+        self.initialize_model_output_and_info()
+
+    def create_model_output_table_warm_start(self):
+        """
+        For warm start, we need to copy the model output table to a temp table
+        because we call truncate on the model output table while training.
+        If the query gets aborted, we need to make sure that the user passed
+        model output table can be recovered.
+        """
+        plpy.execute("""
+            CREATE TABLE {self.model_output_table} (
+            LIKE {self.original_model_output_table} INCLUDING indexes);
+            """.format(self=self))
+
+        plpy.execute("""INSERT INTO {self.model_output_table}
+            SELECT * FROM {self.original_model_output_table};
+            """.format(self=self))
+
+        plpy.execute(""" DELETE FROM {self.model_output_table}
                 WHERE {self.mst_key_col} NOT IN (
                     SELECT {self.mst_key_col} FROM 
{self.model_selection_table})
                 """.format(self=self))
-            warm_start_msts = plpy.execute(
-                """ SELECT array_agg({0}) AS a FROM {1}
-                """.format(self.mst_key_col, self.model_output_table))[0]['a']
-            plpy.execute("DROP TABLE {0}".format(self.model_info_table))
-
-        else:
-            output_table_create_query = """
-                                        CREATE TABLE {self.model_output_table}
-                                        ({self.mst_key_col} INTEGER PRIMARY 
KEY,
-                                         {self.model_weights_col} BYTEA,
-                                         {self.model_arch_col} JSON)
-                                        """.format(self=self)
-            plpy.execute(output_table_create_query)
+        self.warm_start_msts = plpy.execute(
+            """ SELECT array_agg({0}) AS a FROM {1}
+            """.format(self.mst_key_col, self.model_output_table))[0]['a']
+        plpy.execute("DROP TABLE {0}".format(self.model_info_table))
+        self.initialize_model_output_and_info()
 
+    def initialize_model_output_and_info(self):
         info_table_create_query = """
                                   CREATE TABLE {self.model_info_table}
                                   ({self.mst_key_col} INTEGER PRIMARY KEY,
@@ -313,7 +371,7 @@ class FitMultipleModel():
                                    validation_loss_final DOUBLE PRECISION,
                                    validation_metrics DOUBLE PRECISION[],
                                    validation_loss DOUBLE PRECISION[])
-                               """.format(self=self)
+                                       """.format(self=self)
 
         plpy.execute(info_table_create_query)
         for mst in self.msts:
@@ -322,23 +380,23 @@ class FitMultipleModel():
 
 
             # If warm start is enabled, weights from transfer learning cannot 
be
-            # used, even if a particular model doesn't have warm start weigths.
+            # used, even if a particular model doesn't have warm start weights.
             if self.warm_start:
                 model_weights = None
                 mst_filter = """
-                    WHERE {mst_col}={mst_key}
-                """.format(
-                        mst_col=self.mst_key_col,
-                        mst_key=mst['mst_key']
-                    )
- 
+                            WHERE {mst_col}={mst_key}
+                        """.format(
+                    mst_col=self.mst_key_col,
+                    mst_key=mst['mst_key']
+                )
+
             else:
                 mst_filter = ''
 
             serialized_weights = get_initial_weights(self.model_output_table,
                                                      model_arch,
                                                      model_weights,
-                                                     mst['mst_key'] in 
warm_start_msts,
+                                                     mst['mst_key'] in 
self.warm_start_msts,
                                                      self.use_gpus,
                                                      
self.accessible_gpus_for_seg,
                                                      mst_filter
@@ -352,25 +410,25 @@ class FitMultipleModel():
                 metrics_list) if is_metrics_specified else 'NULL'
 
             info_table_insert_query = """
-                    INSERT INTO {self.model_info_table}({self.mst_key_col},
-                                {self.model_id_col}, {self.compile_params_col},
-                                {self.fit_params_col}, model_type, model_size,
-                                metrics_type)
-                        VALUES ({mst_key_val}, {model_id},
-                                $madlib${compile_params}$madlib$,
-                                $madlib${fit_params}$madlib$, '{model_type}',
-                                {model_size}, {metrics_type})
-                """.format(self=self,
-                           mst_key_val=mst[self.mst_key_col],
-                           model_id=mst[self.model_id_col],
-                           compile_params=mst[self.compile_params_col],
-                           fit_params=mst[self.fit_params_col],
-                           model_type='madlib_keras',
-                           model_size=model_size,
-                           metrics_type=metrics_type)
+                            INSERT INTO 
{self.model_info_table}({self.mst_key_col},
+                                        {self.model_id_col}, 
{self.compile_params_col},
+                                        {self.fit_params_col}, model_type, 
model_size,
+                                        metrics_type)
+                                VALUES ({mst_key_val}, {model_id},
+                                        $madlib${compile_params}$madlib$,
+                                        $madlib${fit_params}$madlib$, 
'{model_type}',
+                                        {model_size}, {metrics_type})
+                        """.format(self=self,
+                                   mst_key_val=mst[self.mst_key_col],
+                                   model_id=mst[self.model_id_col],
+                                   compile_params=mst[self.compile_params_col],
+                                   fit_params=mst[self.fit_params_col],
+                                   model_type='madlib_keras',
+                                   model_size=model_size,
+                                   metrics_type=metrics_type)
             plpy.execute(info_table_insert_query)
 
-            if not mst['mst_key'] in warm_start_msts:
+            if not mst['mst_key'] in self.warm_start_msts:
                 output_table_insert_query = """
                                     INSERT INTO {self.model_output_table}(
                                         {self.mst_key_col}, 
{self.model_weights_col},
@@ -484,7 +542,7 @@ class FitMultipleModel():
             if self.validation_table:
                 self.update_info_table(mst, False)
 
-    def run_training(self):
+    def run_training(self, mst_idx):
         # NOTE: In the DL module, we want to avoid CREATING TEMP tables
         # (creates a slice which stays until the session is disconnected)
         # or minimize writing queries that generate plans with Motions 
(creating
@@ -496,6 +554,7 @@ class FitMultipleModel():
         # Therefore we want to have queries that do not add motions and all the
         # sub-queries running Keras/tensorflow operations reuse the same 
slice(process)
         # that was used for initializing GPU memory.
+        use_gpus = self.use_gpus if self.use_gpus else False
         mst_weights_query = """
             CREATE TABLE {self.mst_weights_tbl} AS
                 SELECT mst_tbl.*, wgh_tbl.{self.model_weights_col},
@@ -553,7 +612,55 @@ class FitMultipleModel():
             WHERE {self.model_output_table}.{self.mst_key_col} = 
{self.weights_to_update_tbl}.{self.mst_key_col}
         """.format(self=self)
         plpy.execute(update_query)
-        plpy.execute("DROP TABLE IF EXISTS {0}, {1}, {2}".format(
-                                                        self.mst_weights_tbl,
-                                                        
self.mst_current_schedule_tbl,
-                                                        
self.weights_to_update_tbl))
+
+        self.truncate_and_drop_tables()
+
+    def truncate_and_drop_tables(self):
+        """
+        Context: UPDATE statements in postgres are not in-place replacements 
but
+        the row to be updated is marked for deletion(note that the disk space 
for
+        this row doesn't get released until vaccuum is called) and a new row in
+        inserted.
+
+        This function will clear out the disk space used by the 
model_output_table
+        and also drop all the other intermediate tables.
+        If available, set the `` guc so that the truncate command can release 
the
+        disk space. The disk space will be released immediately and hence the
+        model_output table won't grow in size with each UPDATE statement.
+
+        Without this guc, the disk space won't be released and each
+        call to the UPDATE statement will keep adding to the disk space. The 
disk
+        space will only be released when the query is completed.
+
+        The guc can cause data loss if not used properly. Since truncate will
+        actually clear the disk space immediately, there is no way to recover 
to
+        the state before truncate was called on that table. So this guc should 
only
+        be set for intermediate tables and never for tables created outside the
+        scope of the fit_multiple udf.
+
+        Workflow
+        1. Create temp table from model table (including the indexes)
+        2. truncate the model table to release disk space
+        3. rename temp table to model table so that it can be reused for the 
next
+        hop
+        :return:
+        """
+
+        with SetGUC("dev_opt_unsafe_truncate_in_subtransaction", "on"):
+            temp_model_table = unique_string('updated_model')
+            plpy.execute("""
+            CREATE TABLE {temp_model_table} ( LIKE {self.model_output_table}
+            INCLUDING indexes);""".format(temp_model_table=temp_model_table,
+                                          self=self))
+            plpy.execute("""
+            INSERT INTO {temp_model_table} SELECT * FROM 
{self.model_output_table};
+            TRUNCATE TABLE {self.model_output_table};
+            DROP TABLE {self.model_output_table};
+            """.format(temp_model_table=temp_model_table, self=self))
+            rename_table(self.schema_madlib, temp_model_table,
+                         self.model_output_table)
+            plpy.execute("""
+            TRUNCATE TABLE {self.mst_weights_tbl}, 
{self.mst_current_schedule_tbl},
+            {self.weights_to_update_tbl};
+            DROP TABLE IF EXISTS {self.mst_weights_tbl}, 
{self.mst_current_schedule_tbl},
+            {self.weights_to_update_tbl};""".format(self=self))
diff --git 
a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
 
b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
index df79d86..cd58d93 100644
--- 
a/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
+++ 
b/src/ports/postgres/modules/deep_learning/madlib_keras_fit_multiple_model.sql_in
@@ -88,6 +88,12 @@ You can set up the models and hyperparameters to try with the
 Model Selection</a> utility to define the unique combinations
 of model architectures, compile and fit parameters.
 
+@note If 'madlib_keras_fit_multiple_model()' is running on GPDB 5, the 
database will
+keep adding to the disk space (in proportion to model size) and will only
+release the disk space once the fit multiple query has completed execution.
+This is not the case for GPDB 6+ where disk space is released during the
+fit multiple query.
+
 @anchor keras_fit
 @par Fit
 The fit (training) function has the following format:
@@ -120,6 +126,7 @@ madlib_keras_fit_multiple_model(
   <dt>model_output_table</dt>
   <dd>TEXT. Name of the output table containing the
   multiple models created.
+  @note pg_temp is not allowed as an output table schema for fit multiple.
   Details of output tables are shown below.
   </dd>
 
diff --git 
a/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection.sql_in
 
b/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection.sql_in
index ddf2e0f..d101135 100644
--- 
a/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection.sql_in
+++ 
b/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection.sql_in
@@ -346,6 +346,11 @@ SELECT madlib_keras_fit_multiple_model(
        FALSE
 );
 
+-- The default value of the guc 'dev_opt_unsafe_truncate_in_subtransaction' is 
'off'
+-- but we change it to 'on' in fit_multiple.py. Assert that the value is
+-- reset after calling fit_multiple
+select assert_guc_value('dev_opt_unsafe_truncate_in_subtransaction', 'off');
+
 SELECT assert(COUNT(*)=4, 'Info table must have exactly same rows as the 
number of msts.')
 FROM iris_multiple_model_info;
 
@@ -388,4 +393,29 @@ SELECT assert(
         'Keras Fit Multiple num_clases and class values Validation failed. 
Actual:' || __to_char(summary))
 FROM (SELECT * FROM iris_multiple_model_summary) summary;
 
+------------- Test for schema qualified output table and input table 
-----------------------
+CREATE SCHEMA __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__;
+CREATE TABLE __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_data_packed as 
select * from iris_data_packed;
+CREATE TABLE __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_data_packed_summary 
as select * from iris_data_packed_summary;
+
+-- do not drop the output table created in the previous test
+SELECT count(*) from iris_multiple_model;
+SELECT madlib_keras_fit_multiple_model(
+       '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_data_packed',
+       '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model',
+       'mst_table_1row',
+       1,
+       FALSE,
+       NULL,
+       1,
+       FALSE
+);
+SELECT count(*) from 
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model;
+SELECT assert(
+        num_classes = 3 AND
+        class_values = '{Iris-setosa,Iris-versicolor,NULL}',
+        'Keras Fit Multiple validation failed. Actual:' || __to_char(summary))
+FROM (SELECT * FROM 
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model_summary) summary;
+
+DROP SCHEMA __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__ CASCADE;
 !>)
diff --git 
a/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection_e2e.sql_in
 
b/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection_e2e.sql_in
index 43f08d0..818a013 100644
--- 
a/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection_e2e.sql_in
+++ 
b/src/ports/postgres/modules/deep_learning/test/madlib_keras_model_selection_e2e.sql_in
@@ -45,12 +45,13 @@ SELECT load_model_selection_table(
     ]
 );
 
-DROP TABLE if exists pg_temp.iris_multiple_model,
-                     pg_temp.iris_multiple_model_summary,
-                     pg_temp.iris_multiple_model_info;
+CREATE SCHEMA __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__;
+DROP TABLE if exists 
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model,
+                     
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model_summary,
+                     
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model_info;
 SELECT madlib_keras_fit_multiple_model(
        'iris_data_packed',
-       'pg_temp.iris_multiple_model',
+       '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model',
        'pg_temp.mst_table',
        3,
        FALSE
@@ -59,9 +60,9 @@ SELECT madlib_keras_fit_multiple_model(
 SELECT assert(
         model_arch_table = 'iris_model_arch' AND
         validation_table is NULL AND
-        model_info = 'pg_temp.iris_multiple_model_info' AND
+        model_info = 
'__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model_info' AND
         source_table = 'iris_data_packed' AND
-        model = 'pg_temp.iris_multiple_model' AND
+        model = '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model' 
AND
         dependent_varname = 'class_text' AND
         independent_varname = 'attributes' AND
         madlib_version is NOT NULL AND
@@ -73,12 +74,12 @@ SELECT assert(
         dependent_vartype LIKE '%char%' AND
         normalizing_const = 1,
         'Keras Fit Multiple Output Summary Validation failed. Actual:' || 
__to_char(summary))
-FROM (SELECT * FROM pg_temp.iris_multiple_model_summary) summary;
+FROM (SELECT * FROM 
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model_summary) summary;
 
 -- Run Predict
 DROP TABLE IF EXISTS pg_temp.iris_predict;
 SELECT madlib_keras_predict(
-    'pg_temp.iris_multiple_model',
+    '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model',
     'iris_data',
     'id',
     'attributes',
@@ -90,7 +91,7 @@ SELECT madlib_keras_predict(
 -- Run Evaluate
 DROP TABLE IF EXISTS pg_temp.evaluate_out;
 SELECT madlib_keras_evaluate(
-    'pg_temp.iris_multiple_model',
+    '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_multiple_model',
     'iris_data_val',
     'pg_temp.evaluate_out',
     NULL,
@@ -155,4 +156,6 @@ SELECT assert(loss >= 0 AND
         metric >= 0 AND
         metrics_type = '{accuracy}', 'Evaluate output validation failed.  
Actual:' || __to_char(evaluate_out))
 FROM evaluate_out;
+
+DROP SCHEMA __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__ CASCADE;
 !>)
diff --git 
a/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in
 
b/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in
index c73fc74..c5c8a93 100644
--- 
a/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in
+++ 
b/src/ports/postgres/modules/deep_learning/test/madlib_keras_transfer_learning.sql_in
@@ -25,10 +25,11 @@ m4_include(`SQLCommon.m4')
              `\(.*\)libmadlib\.so',
              
`\1../../modules/deep_learning/test/madlib_keras_iris.setup.sql_in'
 )
-
-DROP TABLE IF EXISTS pg_temp.iris_model, pg_temp.iris_model_summary;
+-------------- Warm start test (along with schema qualified output table) 
-------------------------
+CREATE SCHEMA __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__;
+DROP TABLE IF EXISTS iris_model, iris_model_summary;
 SELECT madlib_keras_fit('iris_data_packed',   -- source table
-                        'pg_temp.iris_model',          -- model output table
+                        '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model', 
         -- model output table
                         'iris_model_arch',  -- model arch table
                          1,                    -- model arch id
                          $$ loss='categorical_crossentropy', optimizer='adam', 
metrics=['accuracy'] $$,  -- compile_params
@@ -45,43 +46,44 @@ SELECT assert(
   array_upper(training_loss, 1) = 5 AND
   array_upper(training_metrics, 1) = 5,
   'metrics compute frequency must be 1.')
-FROM pg_temp.iris_model_summary;
+FROM __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model_summary;
 
 SELECT assert(
   training_loss[5]-training_loss[1] < 0.1 AND
   training_metrics[5]-training_metrics[1] > -0.1,
     'The loss and accuracy should have improved with more iterations.'
 )
-FROM pg_temp.iris_model_summary;
+FROM __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model_summary;
 
 -- Make a copy of the loss and metrics array, to compare it with runs after
 -- warm start and transfer learning.
 DROP TABLE IF EXISTS iris_model_first_run;
 CREATE TABLE iris_model_first_run AS
 SELECT training_loss_final, training_metrics_final
-FROM pg_temp.iris_model_summary;
+FROM __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model_summary;
 
 -- Copy weights that were learnt from the previous run, for transfer
 -- learning. Copy it now, because using warm_start will overwrite it.
-UPDATE iris_model_arch set model_weights = (select model_weights from 
iris_model)  WHERE model_id = 2;
+UPDATE iris_model_arch set model_weights = (select model_weights from 
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model)
+WHERE model_id = 2;
 
 -- Warm start test
 SELECT madlib_keras_fit('iris_data_packed',   -- source table
-                       'pg_temp.iris_model',          -- model output table
+                       '__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model',  
        -- model output table
                        'iris_model_arch',  -- model arch table
                         2,                    -- model arch id
                         $$ loss='categorical_crossentropy', optimizer='adam', 
metrics=['accuracy'] $$,  -- compile_params
                         $$ batch_size=5, epochs=3 $$,  -- fit_params
                         2,                    -- num_iterations,
                         NULL, NULL, 1,
-                        true -- warm start
+                        TRUE -- warm start
                       );
 
 SELECT assert(
   array_upper(training_loss, 1) = 2 AND
   array_upper(training_metrics, 1) = 2,
   'metrics compute frequency must be 1.')
-FROM pg_temp.iris_model_summary;
+FROM __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model_summary;
 
 SELECT assert(
   abs(first.training_loss_final-second.training_loss[1]) < 1e-6 AND
@@ -89,12 +91,16 @@ SELECT assert(
   abs(first.training_metrics_final-second.training_metrics[1]) < 1e-10 AND
   abs(first.training_metrics_final-second.training_metrics[2]) < 1e-10,
   'warm start test failed because training loss and metrics don''t match the 
expected value from the previous run of keras fit.')
-FROM iris_model_first_run AS first, pg_temp.iris_model_summary AS second;
+FROM iris_model_first_run AS first, 
__MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__.iris_model_summary AS second;
+
+DROP SCHEMA __MADLIB__DEEP_LEARNING_SCHEMA__MADLIB__ CASCADE;
 
--- Transfer learning test
+---------------- end Warm start test 
----------------------------------------------------
+
+---------------- Transfer learning test 
----------------------------------------------------
 DROP TABLE IF EXISTS iris_model_transfer, iris_model_transfer_summary;
 SELECT madlib_keras_fit('iris_data_packed',   -- source table
-                       'pg_temp.iris_model_transfer',          -- model output 
table
+                       'iris_model_transfer',          -- model output table
                        'iris_model_arch',  -- model arch table
                         2,                    -- model arch id
                         $$ loss='categorical_crossentropy', optimizer='adam', 
metrics=['accuracy'] $$,  -- compile_params
@@ -107,7 +113,7 @@ SELECT assert(
   array_upper(training_loss, 1) = 2 AND
   array_upper(training_metrics, 1) = 2,
   'metrics compute frequency must be 1.')
-FROM pg_temp.iris_model_transfer_summary;
+FROM iris_model_transfer_summary;
 
 SELECT assert(
   abs(first.training_loss_final-second.training_loss[1]) < 1e-6 AND
@@ -115,7 +121,8 @@ SELECT assert(
   abs(first.training_metrics_final-second.training_metrics[1]) < 1e-10 AND
   abs(first.training_metrics_final-second.training_metrics[2]) < 1e-10,
   'Transfer learning test failed because training loss and metrics don''t 
match the expected value.')
-FROM iris_model_first_run AS first, iris_model_transfer_summary AS second;
+FROM iris_model_first_run AS first,
+iris_model_transfer_summary AS second;
 
 -- Rerun the iris setup to discard the changes
 \i m4_regexp(MODULE_PATHNAME,
@@ -139,11 +146,11 @@ SELECT load_model_selection_table(
     ]
 );
 
-DROP TABLE if exists pg_temp.iris_multiple_model, 
pg_temp.iris_multiple_model_summary, pg_temp.iris_multiple_model_info;
+DROP TABLE if exists iris_multiple_model, iris_multiple_model_summary, 
iris_multiple_model_info;
 SELECT setseed(0);
 SELECT madlib_keras_fit_multiple_model(
   'iris_data_packed',
-  'pg_temp.iris_multiple_model',
+  'iris_multiple_model',
   'mst_table',
   3,
   FALSE, NULL, 1
@@ -153,12 +160,12 @@ DROP TABLE IF EXISTS iris_model_first_run;
 CREATE TABLE iris_model_first_run AS
 SELECT mst_key, model_id, training_loss, training_metrics,
     training_loss_final, training_metrics_final
-FROM pg_temp.iris_multiple_model_info;
+FROM iris_multiple_model_info;
 
 -- warm start for fit multiple model
 SELECT madlib_keras_fit_multiple_model(
   'iris_data_packed',
-  'pg_temp.iris_multiple_model',
+  'iris_multiple_model',
   'mst_table',
   3,
   FALSE,
@@ -170,7 +177,7 @@ SELECT assert(
   array_upper(training_loss, 1) = 3 AND
   array_upper(training_metrics, 1) = 3,
   'metrics compute frequency must be 1.')
-FROM pg_temp.iris_multiple_model_info;
+FROM iris_multiple_model_info;
 
 SELECT assert(
   abs(first.training_loss_final-second.training_loss[1]) < 1e-6 AND
@@ -178,7 +185,7 @@ SELECT assert(
   abs(first.training_metrics_final-second.training_metrics[1]) < 1e-10 AND
   abs(first.training_metrics_final-second.training_metrics[2]) < 1e-10,
   'warm start test failed because training loss and metrics don''t match the 
expected value from the previous run of keras fit.')
-FROM iris_model_first_run AS first, pg_temp.iris_multiple_model_info AS second
+FROM iris_model_first_run AS first, iris_multiple_model_info AS second
 WHERE first.mst_key = second.mst_key AND first.model_id = 2;
 
 -- warm start with different mst tables
@@ -244,6 +251,11 @@ SELECT madlib_keras_fit_multiple_model(
   NULL, 1,
   TRUE -- warm_start
 );
+-- The default value of the guc 'dev_opt_unsafe_truncate_in_subtransaction' is 
'off'
+-- but we change it to 'on' in fit_multiple.py. Assert that the value is
+-- reset after calling fit_multiple
+select assert_guc_value('dev_opt_unsafe_truncate_in_subtransaction', 'off');
+
 
 SELECT assert(
   5 IN (SELECT mst_key FROM iris_multiple_model),
@@ -298,30 +310,31 @@ SELECT load_model_selection_table(
     ]
 );
 
-DROP TABLE if exists pg_temp.iris_multiple_model, 
pg_temp.iris_multiple_model_summary, pg_temp.iris_multiple_model_info;
+-- TODO we need to drop iris_multiple_model as well as iris_multiple_model
+DROP TABLE if exists iris_multiple_model, iris_multiple_model_summary, 
iris_multiple_model_info;
 SELECT setseed(0);
 SELECT madlib_keras_fit_multiple_model(
   'iris_data_packed',
-  'pg_temp.iris_multiple_model',
+  'iris_multiple_model',
   'mst_table',
   3,
   FALSE, NULL, 1
 );
 
 UPDATE iris_model_arch
-SET model_weights = (SELECT model_weights FROM pg_temp.iris_multiple_model 
WHERE mst_key=1)
+SET model_weights = (SELECT model_weights FROM iris_multiple_model WHERE 
mst_key=1)
 WHERE model_id = 1;
 
 DROP TABLE IF EXISTS iris_model_first_run;
 CREATE TABLE iris_model_first_run AS
 SELECT mst_key, model_id, training_loss, training_metrics,
     training_loss_final, training_metrics_final
-FROM pg_temp.iris_multiple_model_info;
+FROM iris_multiple_model_info;
 
-DROP TABLE if exists pg_temp.iris_multiple_model, 
pg_temp.iris_multiple_model_summary, pg_temp.iris_multiple_model_info;
+DROP TABLE if exists iris_multiple_model, iris_multiple_model_summary, 
iris_multiple_model_info;
 SELECT madlib_keras_fit_multiple_model(
   'iris_data_packed',
-  'pg_temp.iris_multiple_model',
+  'iris_multiple_model',
   'mst_table',
   3,
   FALSE, NULL, 1
@@ -330,6 +343,8 @@ SELECT madlib_keras_fit_multiple_model(
 SELECT assert(
   (first.training_loss_final-second.training_loss_final) > 1e-6,
   'Transfer learning test failed because training loss and metrics don''t 
match the expected value.')
-FROM iris_model_first_run AS first, pg_temp.iris_multiple_model_info AS second
+FROM iris_model_first_run AS first, iris_multiple_model_info AS second
 WHERE first.mst_key = second.mst_key AND first.model_id = 1;
+
 !>)
+
diff --git a/src/ports/postgres/modules/utilities/control.py_in 
b/src/ports/postgres/modules/utilities/control.py_in
index b52a881..32f2f59 100644
--- a/src/ports/postgres/modules/utilities/control.py_in
+++ b/src/ports/postgres/modules/utilities/control.py_in
@@ -74,7 +74,8 @@ class SetGUC(ContextDecorator):
                 return self
 
             if self.new_guc_value:
-                plpy.execute("set {0}={1}".format(self.guc_name, 
self.new_guc_value))
+                plpy.execute("set {0}='{1}'".format(self.guc_name,
+                                                    self.new_guc_value))
             else:
                 if self.error_on_fail:
                     plpy.error("Cannot set {0} to None. Please provide a valid 
value"
diff --git 
a/src/ports/postgres/modules/utilities/test/unit_tests/test_control.py_in 
b/src/ports/postgres/modules/utilities/test/unit_tests/test_control.py_in
index 2d43968..5e76f78 100644
--- a/src/ports/postgres/modules/utilities/test/unit_tests/test_control.py_in
+++ b/src/ports/postgres/modules/utilities/test/unit_tests/test_control.py_in
@@ -94,18 +94,22 @@ class SetGUCTestCase(unittest.TestCase):
         self.module_patcher.stop()
 
     def test_set_guc_sets_new_value(self):
-        self.plpy_mock_execute.return_value = [{'foo': 'new_bar'}]
+        self.plpy_mock_execute.return_value = [{'foo': 'old_bar'}]
         with self.subject.SetGUC("foo", "new_bar") as C:
             self.assertTrue("new_bar", C.new_guc_value)
-        self.plpy_mock_execute.assert_called_with(
-            "set foo='new_bar'")
+        self.assertEqual(3, self.plpy_mock_execute.call_count)
+        self.plpy_mock_execute.assert_has_calls([call('show foo'),
+                                                 call("set foo='new_bar'"),
+                                                 call("set foo='old_bar'")])
+
 
     def test_set_guc_missing(self):
         self.plpy_mock_execute.side_effect = plpy.SPIError(
             'Unrecognized configuration parameter "foo"')
         with self.subject.SetGUC("foo", "new_bar") as C:
             self.assertFalse(C.guc_exists)
-
+        self.assertEqual(1, self.plpy_mock_execute.call_count)
+        self.plpy_mock_execute.assert_has_calls([call('show foo')])
 
 if __name__ == '__main__':
     unittest.main()

Reply via email to