diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 8577c7b138..0798c8da0b 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1796,10 +1796,9 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
 			   Cost comparison_cost, int sort_mem,
 			   double limit_tuples)
 {
-	double		input_bytes = relation_byte_size(tuples, width);
 	double		output_bytes;
 	double		output_tuples;
-	long		sort_mem_bytes = sort_mem * 1024L;
+	int64		sort_mem_bytes = sort_mem * 1024;
 
 	/*
 	 * We want to be sure the cost of a sort is never estimated as zero, even
@@ -1812,7 +1811,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
 	comparison_cost += 2.0 * cpu_operator_cost;
 
 	/* Do we have a useful LIMIT? */
-	if (limit_tuples > 0 && limit_tuples < tuples)
+	if (limit_tuples > 0.0 && limit_tuples < tuples)
 	{
 		output_tuples = limit_tuples;
 		output_bytes = relation_byte_size(output_tuples, width);
@@ -1820,7 +1819,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
 	else
 	{
 		output_tuples = tuples;
-		output_bytes = input_bytes;
+		output_bytes = relation_byte_size(tuples, width);
 	}
 
 	if (output_bytes > sort_mem_bytes)
@@ -1828,6 +1827,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
 		/*
 		 * We'll have to use a disk-based sort of all the tuples
 		 */
+		double		input_bytes = relation_byte_size(tuples, width);
 		double		npages = ceil(input_bytes / BLCKSZ);
 		double		nruns = input_bytes / sort_mem_bytes;
 		double		mergeorder = tuplesort_merge_order(sort_mem_bytes);
@@ -1853,7 +1853,7 @@ cost_tuplesort(Cost *startup_cost, Cost *run_cost,
 		*startup_cost += npageaccesses *
 			(seq_page_cost * 0.75 + random_page_cost * 0.25);
 	}
-	else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
+	else if (tuples > 2.0 * output_tuples || input_bytes > sort_mem_bytes)
 	{
 		/*
 		 * We'll use a bounded heap-sort keeping just K tuples in memory, for