Repository: incubator-hawq
Updated Branches:
  refs/heads/master d2d4ae84c -> 157a6699d


HAWQ-420. Memory leaks in DataLocality during ANALYZE lasting 1 ~ 2 days on 100 
nodes cluster.


Project: http://git-wip-us.apache.org/repos/asf/incubator-hawq/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-hawq/commit/157a6699
Tree: http://git-wip-us.apache.org/repos/asf/incubator-hawq/tree/157a6699
Diff: http://git-wip-us.apache.org/repos/asf/incubator-hawq/diff/157a6699

Branch: refs/heads/master
Commit: 157a6699de14780fbc5967a67794fe4892629588
Parents: d2d4ae8
Author: hubertzhang <hzh...@pivotal.io>
Authored: Thu Feb 18 17:31:19 2016 +0800
Committer: hubertzhang <hzh...@pivotal.io>
Committed: Fri Feb 19 10:32:12 2016 +0800

----------------------------------------------------------------------
 src/backend/cdb/cdbdatalocality.c | 531 +++++++++++++++++----------------
 1 file changed, 271 insertions(+), 260 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/157a6699/src/backend/cdb/cdbdatalocality.c
----------------------------------------------------------------------
diff --git a/src/backend/cdb/cdbdatalocality.c 
b/src/backend/cdb/cdbdatalocality.c
index 6fc04ae..72f58a0 100644
--- a/src/backend/cdb/cdbdatalocality.c
+++ b/src/backend/cdb/cdbdatalocality.c
@@ -3950,7 +3950,9 @@ static void cleanup_allocation_algorithm(
                }
        }
 
-       
MemoryContextResetAndDeleteChildren(context->datalocality_memorycontext);
+       if(DataLocalityMemoryContext){
+         MemoryContextResetAndDeleteChildren(DataLocalityMemoryContext);
+       }
 
        return;
 }
@@ -3997,306 +3999,315 @@ calculate_planner_segment_num(Query *query, 
QueryResourceLife resourceLife,
                return result;
        }
 
-       init_datalocality_memory_context();
-
-       init_datalocality_context(&context);
-
-       /*
-        * Initialize QueryResourceParameters in QD
-        *
-        * We use CacheMemoryContext/TopMemoryContext here so that the
-        * QueryResourceParameter can be available in the session. Thus,
-        * it can be used in multiple "EXECUTION"s of the prepared
-        * statement (i.e., "PREPARE", "BIND", "EXECUTION").
-        */
-       MemoryContext oldcontext = NULL;
-       if ( CacheMemoryContext != NULL )
+       PG_TRY();
        {
-               oldcontext = MemoryContextSwitchTo(CacheMemoryContext);
-       }
-       else
-       {
-               oldcontext = MemoryContextSwitchTo(TopMemoryContext);
-       }
-       resource_parameters = (QueryResourceParameters 
*)palloc(sizeof(QueryResourceParameters));
-       MemoryContextSwitchTo(oldcontext);
+               init_datalocality_memory_context();
 
-       collect_range_tables(query, fullRangeTable, &(context.rtc_context));
+               init_datalocality_context(&context);
 
-       bool isTableFunctionExists = false;
+               /*
+                * Initialize QueryResourceParameters in QD
+                *
+                * We use CacheMemoryContext/TopMemoryContext here so that the
+                * QueryResourceParameter can be available in the session. Thus,
+                * it can be used in multiple "EXECUTION"s of the prepared
+                * statement (i.e., "PREPARE", "BIND", "EXECUTION").
+                */
+               MemoryContext oldcontext = NULL;
+               if ( CacheMemoryContext != NULL )
+               {
+                       oldcontext = MemoryContextSwitchTo(CacheMemoryContext);
+               }
+               else
+               {
+                       oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+               }
+               resource_parameters = (QueryResourceParameters 
*)palloc(sizeof(QueryResourceParameters));
+               MemoryContextSwitchTo(oldcontext);
 
-       /*
-        * the number of virtual segments is determined by 5 factors:
-        * 1 bucket number of external table
-        * 2 whether function exists
-        * 3 bucket number of hash result relation
-        * 4 bucket number of hash "from" relation
-        * 5 data size of random "from" relation
-        */
+               collect_range_tables(query, fullRangeTable, 
&(context.rtc_context));
 
-       /*convert range table list to oid list and check whether table function 
exists
-        *we keep a full range table list and a range table list without result 
relation separately
-        */
-       convert_range_tables_to_oids_and_check_table_functions(
-                       &(context.rtc_context.full_range_tables), 
&isTableFunctionExists,
-                       context.datalocality_memorycontext);
-       convert_range_tables_to_oids_and_check_table_functions(
-                       &(context.rtc_context.range_tables), 
&isTableFunctionExists,
-                       context.datalocality_memorycontext);
-
-       /*Table Function VSeg Number = default_segment_number(configured in 
GUC) if table function exists,
-        *0 Otherwise.
-        */
-       if (isTableFunctionExists) {
-               context.tableFuncSegNum = default_segment_num;
-       }
+               bool isTableFunctionExists = false;
 
-       /* set expected virtual segment number for hash table and external 
table*/
-       /* calculate hashSegNum, externTableSegNum, resultRelationHashSegNum */
-       check_keep_hash_and_external_table(&context, query, intoPolicy);
-
-       /* get block location and calculate relation size*/
-       get_block_locations_and_claculte_table_size(&context);
-
-       /*use inherit resource*/
-       if (resourceLife == QRL_INHERIT) {
-               resource = AllocateResource(resourceLife, sliceNum, 0, 0, 0, 
NULL, 0);
-
-               saveQueryResourceParameters(
-                       resource_parameters,  /* resource_parameters */
-                       resourceLife,         /* life */
-                       sliceNum,             /* slice_size */
-                       0,                    /* iobytes */
-                       0,                    /* max_target_segment_num */
-                       0,                    /* min_target_segment_num */
-                       NULL,                 /* vol_info */
-                       0                     /* vol_info_size */ 
-                       );
-
-               if (resource != NULL) {
-                       if ((context.keep_hash)
-                                       && (list_length(resource->segments) != 
context.hashSegNum)) {
-                               context.keep_hash = false;
-                       }
-               }
-       }
+               /*
+                * the number of virtual segments is determined by 5 factors:
+                * 1 bucket number of external table
+                * 2 whether function exists
+                * 3 bucket number of hash result relation
+                * 4 bucket number of hash "from" relation
+                * 5 data size of random "from" relation
+                */
 
-       /*allocate new resource*/
-       if (((resourceLife == QRL_INHERIT) && (resource == NULL))
-                       || (resourceLife == QRL_ONCE) || (resourceLife == 
QRL_NONE)) {
-               /*generate hostname-volumn pair to help RM to choose a host with
-                *maximum data locality(only when the vseg number less than 
host number)
+               /*convert range table list to oid list and check whether table 
function exists
+                *we keep a full range table list and a range table list 
without result relation separately
                 */
-               if(enable_prefer_list_to_rm){
-                       context.host_context.size = context.dds_context.size;
-                       
MemoryContextSwitchTo(context.datalocality_memorycontext);
-                       context.host_context.hostnameVolInfos = 
(HostnameVolumnInfo *) palloc(
-                                       sizeof(HostnameVolumnInfo) * 
context.host_context.size);
-                       for (int i = 0; i < context.host_context.size; i++) {
-                               
MemSet(&(context.host_context.hostnameVolInfos[i].hostname), 0,
-                                               HOSTNAME_MAX_LENGTH);
-                               
strncpy(context.host_context.hostnameVolInfos[i].hostname,
-                                               
context.dds_context.volInfos[i].hashEntry->key.hostname,
-                                               HOSTNAME_MAX_LENGTH-1);
-                               
context.host_context.hostnameVolInfos[i].datavolumn = 
context.dds_context.volInfos[i].datavolumn;
-                       }
-                       MemoryContextSwitchTo(context.old_memorycontext);
-               }else{
-                       context.host_context.size = 0;
-                       context.host_context.hostnameVolInfos = NULL;
+               convert_range_tables_to_oids_and_check_table_functions(
+                               &(context.rtc_context.full_range_tables), 
&isTableFunctionExists,
+                               context.datalocality_memorycontext);
+               convert_range_tables_to_oids_and_check_table_functions(
+                               &(context.rtc_context.range_tables), 
&isTableFunctionExists,
+                               context.datalocality_memorycontext);
+
+               /*Table Function VSeg Number = 
default_segment_number(configured in GUC) if table function exists,
+                *0 Otherwise.
+                */
+               if (isTableFunctionExists) {
+                       context.tableFuncSegNum = default_segment_num;
                }
 
-               /* determine the random table segment number by the following 4 
steps*/
-               /* Step1 we expect one split(block) processed by one virtual 
segment*/
-               context.randomSegNum = context.total_split_count;
-               /* Step2 combine segment when splits are with small size*/
-               int64 min_split_size = min_datasize_to_combine_segment; 
/*default 128M*/
-               min_split_size <<= 20;
-               int expected_segment_num_with_minsize = (context.total_size + 
min_split_size - 1)
-                               / min_split_size;
-               if (context.randomSegNum > expected_segment_num_with_minsize) {
-                       context.randomSegNum = 
expected_segment_num_with_minsize;
-               }
-               /* Step3 split segment when there are tow many files (default 
add one more segment per 100(guc) files)*/
-               int expected_segment_num_with_max_filecount = 
(context.total_file_count
-                               + max_filecount_notto_split_segment - 1)
-                               / max_filecount_notto_split_segment;
-               if (context.randomSegNum < 
expected_segment_num_with_max_filecount) {
-                       context.randomSegNum = 
expected_segment_num_with_max_filecount;
-               }
-               /* Step4 we at least use one segment*/
-               if (context.randomSegNum < minimum_segment_num) {
-                       context.randomSegNum = minimum_segment_num;
-               }
+               /* set expected virtual segment number for hash table and 
external table*/
+               /* calculate hashSegNum, externTableSegNum, 
resultRelationHashSegNum */
+               check_keep_hash_and_external_table(&context, query, intoPolicy);
 
-               int maxExpectedNonRandomSegNum = 0;
-               if (maxExpectedNonRandomSegNum < context.externTableSegNum)
-                       maxExpectedNonRandomSegNum = context.externTableSegNum;
-               if (maxExpectedNonRandomSegNum < context.tableFuncSegNum)
-                       maxExpectedNonRandomSegNum = context.tableFuncSegNum;
-               if (maxExpectedNonRandomSegNum < context.hashSegNum)
-                       maxExpectedNonRandomSegNum = context.hashSegNum;
+               /* get block location and calculate relation size*/
+               get_block_locations_and_claculte_table_size(&context);
 
-               if (debug_fake_segmentnum){
-                       fpsegnum = fopen("/tmp/segmentnumber", "w+");
-                       fprintf(fpsegnum, "Default segment num : %d.\n", 
default_segment_num);
-                       fprintf(fpsegnum, "\n");
-                       fprintf(fpsegnum, "From random relation segment num : 
%d.\n", context.randomSegNum);
-                       fprintf(fpsegnum, "Result relation hash segment num : 
%d.\n", context.resultRelationHashSegNum);
-                       fprintf(fpsegnum, "\n");
-                       fprintf(fpsegnum, "Table  function      segment num : 
%d.\n", context.tableFuncSegNum);
-                       fprintf(fpsegnum, "Extern table         segment num : 
%d.\n", context.externTableSegNum);
-                       fprintf(fpsegnum, "From hash relation   segment num : 
%d.\n", context.hashSegNum);
-                       fprintf(fpsegnum, "MaxExpectedNonRandom segment num : 
%d.\n", maxExpectedNonRandomSegNum);
-                       fprintf(fpsegnum, "\n");
+               /*use inherit resource*/
+               if (resourceLife == QRL_INHERIT) {
+                       resource = AllocateResource(resourceLife, sliceNum, 0, 
0, 0, NULL, 0);
+
+                       saveQueryResourceParameters(
+                                                       resource_parameters,  
/* resource_parameters */
+                                                       resourceLife,         
/* life */
+                                                       sliceNum,             
/* slice_size */
+                                                       0,                    
/* iobytes */
+                                                       0,                    
/* max_target_segment_num */
+                                                       0,                    
/* min_target_segment_num */
+                                                       NULL,                 
/* vol_info */
+                                                       0                     
/* vol_info_size */
+                                                       );
+
+                       if (resource != NULL) {
+                               if ((context.keep_hash)
+                                               && 
(list_length(resource->segments) != context.hashSegNum)) {
+                                       context.keep_hash = false;
+                               }
+                       }
                }
 
-               int minTargetSegmentNumber = 0;
-               int maxTargetSegmentNumber = 0;
-               /* we keep resultRelationHashSegNum in the highest priority*/
-               if (context.resultRelationHashSegNum != 0) {
-                       if (context.resultRelationHashSegNum < 
context.externTableSegNum
-                                       && context.externTableSegNum != 0) {
-                               cleanup_allocation_algorithm(&context);
-                               elog(ERROR, "Could not allocate enough memory! "
-                                               "bucket number of result hash 
table and external table should match each other");
+               /*allocate new resource*/
+               if (((resourceLife == QRL_INHERIT) && (resource == NULL))
+                               || (resourceLife == QRL_ONCE) || (resourceLife 
== QRL_NONE)) {
+                       /*generate hostname-volumn pair to help RM to choose a 
host with
+                        *maximum data locality(only when the vseg number less 
than host number)
+                        */
+                       if(enable_prefer_list_to_rm){
+                               context.host_context.size = 
context.dds_context.size;
+                               
MemoryContextSwitchTo(context.datalocality_memorycontext);
+                               context.host_context.hostnameVolInfos = 
(HostnameVolumnInfo *) palloc(
+                                               sizeof(HostnameVolumnInfo) * 
context.host_context.size);
+                               for (int i = 0; i < context.host_context.size; 
i++) {
+                                       
MemSet(&(context.host_context.hostnameVolInfos[i].hostname), 0,
+                                                       HOSTNAME_MAX_LENGTH);
+                                       
strncpy(context.host_context.hostnameVolInfos[i].hostname,
+                                                       
context.dds_context.volInfos[i].hashEntry->key.hostname,
+                                                       HOSTNAME_MAX_LENGTH-1);
+                                       
context.host_context.hostnameVolInfos[i].datavolumn = 
context.dds_context.volInfos[i].datavolumn;
+                               }
+                               
MemoryContextSwitchTo(context.old_memorycontext);
+                       }else{
+                               context.host_context.size = 0;
+                               context.host_context.hostnameVolInfos = NULL;
+                       }
+
+                       /* determine the random table segment number by the 
following 4 steps*/
+                       /* Step1 we expect one split(block) processed by one 
virtual segment*/
+                       context.randomSegNum = context.total_split_count;
+                       /* Step2 combine segment when splits are with small 
size*/
+                       int64 min_split_size = min_datasize_to_combine_segment; 
/*default 128M*/
+                       min_split_size <<= 20;
+                       int expected_segment_num_with_minsize = 
(context.total_size + min_split_size - 1)
+                                       / min_split_size;
+                       if (context.randomSegNum > 
expected_segment_num_with_minsize) {
+                               context.randomSegNum = 
expected_segment_num_with_minsize;
                        }
-                       maxTargetSegmentNumber = 
context.resultRelationHashSegNum;
-                       minTargetSegmentNumber = 
context.resultRelationHashSegNum;
-               } else if (maxExpectedNonRandomSegNum > 0) {
-                       /* bucket number of external table must be the same 
with the number of virtual segments*/
-                       if (maxExpectedNonRandomSegNum == 
context.externTableSegNum) {
-                               context.externTableSegNum =
-                                               context.externTableSegNum < 
minimum_segment_num ?
-                                                               
minimum_segment_num : context.externTableSegNum;
-                               maxTargetSegmentNumber = 
context.externTableSegNum;
-                               minTargetSegmentNumber = 
context.externTableSegNum;
-                       } else if (maxExpectedNonRandomSegNum == 
context.hashSegNum) {
-                               /* in general, we keep bucket number of hash 
table equals to the number of virtual segments
-                                * but this rule can be broken when there is a 
large random table in the range tables list
-                                */
-                               context.hashSegNum =
-                                               context.hashSegNum < 
minimum_segment_num ?
-                                               minimum_segment_num : 
context.hashSegNum;
-                               double considerRandomWhenHashExistRatio = 1.5;
-                               /*if size of random table >1.5 *hash table, we 
consider relax the restriction of hash bucket number*/
-                               if (context.randomRelSize
-                                               > 
considerRandomWhenHashExistRatio * context.hashRelSize) {
-                                       if (context.randomSegNum < 
context.hashSegNum) {
-                                               context.randomSegNum = 
context.hashSegNum;
+                       /* Step3 split segment when there are tow many files 
(default add one more segment per 100(guc) files)*/
+                       int expected_segment_num_with_max_filecount = 
(context.total_file_count
+                                       + max_filecount_notto_split_segment - 1)
+                                       / max_filecount_notto_split_segment;
+                       if (context.randomSegNum < 
expected_segment_num_with_max_filecount) {
+                               context.randomSegNum = 
expected_segment_num_with_max_filecount;
+                       }
+                       /* Step4 we at least use one segment*/
+                       if (context.randomSegNum < minimum_segment_num) {
+                               context.randomSegNum = minimum_segment_num;
+                       }
+
+                       int maxExpectedNonRandomSegNum = 0;
+                       if (maxExpectedNonRandomSegNum < 
context.externTableSegNum)
+                               maxExpectedNonRandomSegNum = 
context.externTableSegNum;
+                       if (maxExpectedNonRandomSegNum < 
context.tableFuncSegNum)
+                               maxExpectedNonRandomSegNum = 
context.tableFuncSegNum;
+                       if (maxExpectedNonRandomSegNum < context.hashSegNum)
+                               maxExpectedNonRandomSegNum = context.hashSegNum;
+
+                       if (debug_fake_segmentnum){
+                               fpsegnum = fopen("/tmp/segmentnumber", "w+");
+                               fprintf(fpsegnum, "Default segment num : 
%d.\n", default_segment_num);
+                               fprintf(fpsegnum, "\n");
+                               fprintf(fpsegnum, "From random relation segment 
num : %d.\n", context.randomSegNum);
+                               fprintf(fpsegnum, "Result relation hash segment 
num : %d.\n", context.resultRelationHashSegNum);
+                               fprintf(fpsegnum, "\n");
+                               fprintf(fpsegnum, "Table  function      segment 
num : %d.\n", context.tableFuncSegNum);
+                               fprintf(fpsegnum, "Extern table         segment 
num : %d.\n", context.externTableSegNum);
+                               fprintf(fpsegnum, "From hash relation   segment 
num : %d.\n", context.hashSegNum);
+                               fprintf(fpsegnum, "MaxExpectedNonRandom segment 
num : %d.\n", maxExpectedNonRandomSegNum);
+                               fprintf(fpsegnum, "\n");
+                       }
+
+                       int minTargetSegmentNumber = 0;
+                       int maxTargetSegmentNumber = 0;
+                       /* we keep resultRelationHashSegNum in the highest 
priority*/
+                       if (context.resultRelationHashSegNum != 0) {
+                               if (context.resultRelationHashSegNum < 
context.externTableSegNum
+                                               && context.externTableSegNum != 
0) {
+                                       cleanup_allocation_algorithm(&context);
+                                       elog(ERROR, "Could not allocate enough 
memory! "
+                                                       "bucket number of 
result hash table and external table should match each other");
+                               }
+                               maxTargetSegmentNumber = 
context.resultRelationHashSegNum;
+                               minTargetSegmentNumber = 
context.resultRelationHashSegNum;
+                       } else if (maxExpectedNonRandomSegNum > 0) {
+                               /* bucket number of external table must be the 
same with the number of virtual segments*/
+                               if (maxExpectedNonRandomSegNum == 
context.externTableSegNum) {
+                                       context.externTableSegNum =
+                                                       
context.externTableSegNum < minimum_segment_num ?
+                                                                       
minimum_segment_num : context.externTableSegNum;
+                                       maxTargetSegmentNumber = 
context.externTableSegNum;
+                                       minTargetSegmentNumber = 
context.externTableSegNum;
+                               } else if (maxExpectedNonRandomSegNum == 
context.hashSegNum) {
+                                       /* in general, we keep bucket number of 
hash table equals to the number of virtual segments
+                                        * but this rule can be broken when 
there is a large random table in the range tables list
+                                        */
+                                       context.hashSegNum =
+                                                       context.hashSegNum < 
minimum_segment_num ?
+                                                       minimum_segment_num : 
context.hashSegNum;
+                                       double considerRandomWhenHashExistRatio 
= 1.5;
+                                       /*if size of random table >1.5 *hash 
table, we consider relax the restriction of hash bucket number*/
+                                       if (context.randomRelSize
+                                                       > 
considerRandomWhenHashExistRatio * context.hashRelSize) {
+                                               if (context.randomSegNum < 
context.hashSegNum) {
+                                                       context.randomSegNum = 
context.hashSegNum;
+                                               }
+                                               maxTargetSegmentNumber = 
context.randomSegNum;
+                                               minTargetSegmentNumber = 
minimum_segment_num;
+                                       } else {
+                                               maxTargetSegmentNumber = 
context.hashSegNum;
+                                               minTargetSegmentNumber = 
context.hashSegNum;
+                                       }
+                               } else if (maxExpectedNonRandomSegNum == 
context.tableFuncSegNum) {
+                                       /* if there is a table function, we 
should at least use tableFuncSegNum virtual segments*/
+                                       context.tableFuncSegNum =
+                                                       context.tableFuncSegNum 
< minimum_segment_num ?
+                                                                       
minimum_segment_num : context.tableFuncSegNum;
+                                       if (context.randomSegNum < 
context.tableFuncSegNum) {
+                                               context.randomSegNum = 
context.tableFuncSegNum;
                                        }
                                        maxTargetSegmentNumber = 
context.randomSegNum;
                                        minTargetSegmentNumber = 
minimum_segment_num;
-                               } else {
-                                       maxTargetSegmentNumber = 
context.hashSegNum;
-                                       minTargetSegmentNumber = 
context.hashSegNum;
-                               }
-                       } else if (maxExpectedNonRandomSegNum == 
context.tableFuncSegNum) {
-                               /* if there is a table function, we should at 
least use tableFuncSegNum virtual segments*/
-                               context.tableFuncSegNum =
-                                               context.tableFuncSegNum < 
minimum_segment_num ?
-                                                               
minimum_segment_num : context.tableFuncSegNum;
-                               if (context.randomSegNum < 
context.tableFuncSegNum) {
-                                       context.randomSegNum = 
context.tableFuncSegNum;
                                }
+                       } else {
                                maxTargetSegmentNumber = context.randomSegNum;
                                minTargetSegmentNumber = minimum_segment_num;
                        }
-               } else {
-                       maxTargetSegmentNumber = context.randomSegNum;
-                       minTargetSegmentNumber = minimum_segment_num;
-               }
 
-               if (enforce_virtual_segment_number > 0) {
-                       maxTargetSegmentNumber = enforce_virtual_segment_number;
-                       minTargetSegmentNumber = enforce_virtual_segment_number;
-               }
-               uint64_t before_rm_allocate_resource = gettime_microsec();
+                       if (enforce_virtual_segment_number > 0) {
+                               maxTargetSegmentNumber = 
enforce_virtual_segment_number;
+                               minTargetSegmentNumber = 
enforce_virtual_segment_number;
+                       }
+                       uint64_t before_rm_allocate_resource = 
gettime_microsec();
+
+                       /* cost is use by RM to balance workload between hosts. 
the cost is at least one block size*/
+                       int64 mincost = min_cost_for_each_query;
+                       mincost <<= 20;
+                       int64 queryCost = context.total_size < mincost ? 
mincost : context.total_size;
+                       if (QRL_NONE != resourceLife) {
+                               resource = AllocateResource(QRL_ONCE, sliceNum, 
queryCost,
+                                               maxTargetSegmentNumber, 
minTargetSegmentNumber,
+                                               
context.host_context.hostnameVolInfos, context.host_context.size);
+
+                               saveQueryResourceParameters(
+                                                               
resource_parameters,                   /* resource_parameters */
+                                                               QRL_ONCE,       
                       /* life */
+                                                               sliceNum,       
                       /* slice_size */
+                                                               queryCost,      
                       /* iobytes */
+                                                               
maxTargetSegmentNumber,                /* max_target_segment_num */
+                                                               
minTargetSegmentNumber,                /* min_target_segment_num */
+                                                               
context.host_context.hostnameVolInfos, /* vol_info */
+                                                               
context.host_context.size              /* vol_info_size */
+                                                               );
+
+                       }
+                       /* for explain statement, we doesn't allocate resource 
physically*/
+                       else {
+                               uint32 seg_num, seg_num_min, seg_memory_mb;
+                               double seg_core;
+                               GetResourceQuota(maxTargetSegmentNumber, 
minTargetSegmentNumber, &seg_num,
+                                               &seg_num_min, &seg_memory_mb, 
&seg_core);
+                               planner_segments = seg_num;
+                       }
+                       uint64_t after_rm_allocate_resource = 
gettime_microsec();
+                       int eclaspeTime = after_rm_allocate_resource - 
before_rm_allocate_resource;
+                       if(debug_datalocality_time){
+                               elog(LOG, "rm allocate resource overall 
execution time: %d us. \n", eclaspeTime);
+                       }
 
-               /* cost is use by RM to balance workload between hosts. the 
cost is at least one block size*/
-               int64 mincost = min_cost_for_each_query;
-               mincost <<= 20;
-               int64 queryCost = context.total_size < mincost ? mincost : 
context.total_size;
-               if (QRL_NONE != resourceLife) {
-                       resource = AllocateResource(QRL_ONCE, sliceNum, 
queryCost,
-                                       maxTargetSegmentNumber, 
minTargetSegmentNumber,
-                                       context.host_context.hostnameVolInfos, 
context.host_context.size);
+                       if (resource == NULL) {
+                               result->resource = NULL;
+                               result->resource_parameters = NULL;
+                               result->alloc_results = NIL;
+                               result->relsType = NIL;
+                               result->planner_segments = planner_segments;
+                               return result;
+                       }
 
-                       saveQueryResourceParameters(
-                               resource_parameters,                   /* 
resource_parameters */
-                               QRL_ONCE,                              /* life 
*/
-                               sliceNum,                              /* 
slice_size */
-                               queryCost,                             /* 
iobytes */
-                               maxTargetSegmentNumber,                /* 
max_target_segment_num */
-                               minTargetSegmentNumber,                /* 
min_target_segment_num */
-                               context.host_context.hostnameVolInfos, /* 
vol_info */
-                               context.host_context.size              /* 
vol_info_size */
-                               );
-                       
-               }
-               /* for explain statement, we doesn't allocate resource 
physically*/
-               else {
-                       uint32 seg_num, seg_num_min, seg_memory_mb;
-                       double seg_core;
-                       GetResourceQuota(maxTargetSegmentNumber, 
minTargetSegmentNumber, &seg_num,
-                                       &seg_num_min, &seg_memory_mb, 
&seg_core);
-                       planner_segments = seg_num;
-               }
-               uint64_t after_rm_allocate_resource = gettime_microsec();
-               int eclaspeTime = after_rm_allocate_resource - 
before_rm_allocate_resource;
-               if(debug_datalocality_time){
-                       elog(LOG, "rm allocate resource overall execution time: 
%d us. \n", eclaspeTime);
+                       if (debug_fake_segmentnum){
+                               fprintf(fpsegnum, "Target segment num Min: 
%d.\n", minTargetSegmentNumber);
+                               fprintf(fpsegnum, "Target segment num Max: 
%d.\n", maxTargetSegmentNumber);
+                       }
                }
 
-               if (resource == NULL) {
-                       result->resource = NULL;
-                       result->resource_parameters = NULL;
-                       result->alloc_results = NIL;
-                       result->relsType = NIL;
-                       result->planner_segments = planner_segments;
-                       return result;
-               }
+               MemoryContextSwitchTo(context.datalocality_memorycontext);
+
+               virtual_segments = get_virtual_segments(resource);
+
+               int VirtualSegmentNumber = list_length(virtual_segments);
 
                if (debug_fake_segmentnum){
-                       fprintf(fpsegnum, "Target segment num Min: %d.\n", 
minTargetSegmentNumber);
-                       fprintf(fpsegnum, "Target segment num Max: %d.\n", 
maxTargetSegmentNumber);
+                       fprintf(fpsegnum, "Real   segment num    : %d.\n", 
VirtualSegmentNumber);
+                       fflush(fpsegnum);
+                       fclose(fpsegnum);
+                       fpsegnum = NULL;
+                       elog(ERROR, "Abort fake segment number!");
                }
-       }
 
-       MemoryContextSwitchTo(context.datalocality_memorycontext);
+               /* for normal query if containerCount equals to 0, then stop 
the query.*/
+               if (resourceLife != QRL_NONE && VirtualSegmentNumber == 0) {
+                       cleanup_allocation_algorithm(&context);
+                       elog(ERROR, "Could not allocate enough resource!");
+               }
 
-       virtual_segments = get_virtual_segments(resource);
+               MemoryContextSwitchTo(context.old_memorycontext);
 
-       int VirtualSegmentNumber = list_length(virtual_segments);
+               /* data locality allocation algorithm*/
+               alloc_result = run_allocation_algorithm(result, 
virtual_segments, &resource, &context);
 
-       if (debug_fake_segmentnum){
-               fprintf(fpsegnum, "Real   segment num    : %d.\n", 
VirtualSegmentNumber);
-               fflush(fpsegnum);
-               fclose(fpsegnum);
-               fpsegnum = NULL;
-               elog(ERROR, "Abort fake segment number!");
+               result->resource = resource;
+               result->resource_parameters = resource_parameters;
+               result->alloc_results = alloc_result;
+               result->planner_segments = list_length(resource->segments);
        }
-
-       /* for normal query if containerCount equals to 0, then stop the 
query.*/
-       if (resourceLife != QRL_NONE && VirtualSegmentNumber == 0) {
+       PG_CATCH();
+       {
                cleanup_allocation_algorithm(&context);
-               elog(ERROR, "Could not allocate enough resource!");
+               PG_RE_THROW();
        }
-
-       MemoryContextSwitchTo(context.old_memorycontext);
-
-       /* data locality allocation algorithm*/
-       alloc_result = run_allocation_algorithm(result, virtual_segments, 
&resource, &context);
-
-       result->resource = resource;
-       result->resource_parameters = resource_parameters;
-       result->alloc_results = alloc_result;
-       result->planner_segments = list_length(resource->segments);
-
+       PG_END_TRY();
        cleanup_allocation_algorithm(&context);
 
        if(debug_datalocality_time){
                elog(ERROR, "Abort debug metadata, datalocality, rm Time.");
        }
+
        return result;
 }

Reply via email to