diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index f6225d2..8cf4d1d 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -668,6 +668,55 @@ PostgreSQL documentation
      </varlistentry>
 
      <varlistentry>
+      <term><option>--custom-fetch-table=<replaceable class="parameter">table</replaceable></option></term>
+      <listitem>
+       <para>
+       For the given table(s) a custom number of rows is fetched.
+       <command>pg_dump</command> gets 100 rows at the time when it is
+       exporting the table data as inserts. When you have tables with big
+       rows (i.e. blob fields), this fetch request could fail due to the too
+       big amount of data. With this parameter you can decide for which
+       table(s) to fetch a different number of rows, in particular the
+       number of rows defined via the <option>--custom-fetch-value</option>
+       parameter.
+       </para>
+       <para>
+       This parameter does not change the output of <command>pg_dump</command>:
+       it just controls how to read the data.
+       </para>
+       <para>
+       This option is only meaningful when the <option>--inserts</option>
+       parameter is used and requires the definition of the <option>--custom-fetch-value</option>
+       parameter.
+       </para>
+      </listitem>
+     </varlistentry>
+     <varlistentry>
+      <term><option>--custom-fetch-value=<replaceable class="parameter">nRows</replaceable></option></term>
+      <listitem>
+       <para>
+       The number of rows to fetch for the table(s) identified by the
+       <option>--custom-fetch-table</option> parameter.
+       <command>pg_dump</command> gets 100 rows at the time when it is
+       exporting the table data as inserts. When you have tables with big
+       rows (i.e. blob fields), this fetch request could fail due to the too
+       big amount of data. With this parameter you can fetch the given
+       number of rows (usually a number less than 100) for the table(s)
+       defined by the <option>--custom-fetch-table</option> parameter
+       and this could avoid the failure.
+       </para>
+       <para>
+       This parameter does not change the output of <command>pg_dump</command>:
+       it just controls how to read the data.
+       </para>
+       <para>
+       This option is only meaningful when the <option>--inserts</option>
+       parameter is used and requires the definition of the <option>--custom-fetch-table</option>
+       parameter.
+       </para>
+      </listitem>
+     </varlistentry>
+     <varlistentry>
       <term><option>--disable-dollar-quoting</></term>
       <listitem>
        <para>
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 7241cdf..d585252 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -152,6 +152,7 @@ typedef struct _dumpOptions
 	int			outputNoTablespaces;
 	int			use_setsessauth;
 	int			enable_row_security;
+	int			custom_fetch_value;
 
 	/* default, if no "inclusion" switches appear, is to dump everything */
 	bool		include_everything;
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 7949aad..dc93d02 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -116,6 +116,8 @@ static SimpleStringList table_exclude_patterns = {NULL, NULL};
 static SimpleOidList table_exclude_oids = {NULL, NULL};
 static SimpleStringList tabledata_exclude_patterns = {NULL, NULL};
 static SimpleOidList tabledata_exclude_oids = {NULL, NULL};
+static SimpleStringList custom_fetch_table_patterns = {NULL, NULL};
+static SimpleOidList custom_fetch_table_oids = {NULL, NULL};
 
 
 char		g_opaque_type[10];	/* name for the opaque type */
@@ -344,6 +346,8 @@ main(int argc, char **argv)
 		{"no-security-labels", no_argument, &dopt.no_security_labels, 1},
 		{"no-synchronized-snapshots", no_argument, &dopt.no_synchronized_snapshots, 1},
 		{"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
+		{"custom-fetch-table", required_argument, NULL, 7},
+		{"custom-fetch-value", required_argument, NULL, 8},
 
 		{NULL, 0, NULL, 0}
 	};
@@ -524,6 +528,17 @@ main(int argc, char **argv)
 				dumpsnapshot = pg_strdup(optarg);
 				break;
 
+			case 7:				/* custom fetch table(s) */
+				simple_string_list_append(&custom_fetch_table_patterns, optarg);
+				break;
+			case 8:				/* custom fetch value */
+				dopt.custom_fetch_value = atoi(optarg);
+				if (dopt.custom_fetch_value <= 0 )
+				{
+					write_msg(NULL, "custom fetch value must be bigger than 0\n");
+					exit_nicely(1);
+				}
+				break;
 			default:
 				fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
 				exit_nicely(1);
@@ -581,6 +596,11 @@ main(int argc, char **argv)
 	if (dopt.if_exists && !dopt.outputClean)
 		exit_horribly(NULL, "option --if-exists requires option -c/--clean\n");
 
+	if ((dopt.custom_fetch_value>0)&&(!dopt.dump_inserts))
+	{
+		write_msg(NULL, "option --custom-fetch-value requires option --inserts/--column-inserts\n");
+		exit_nicely(1);
+	}
 	/* Identify archive format to emit */
 	archiveFormat = parseArchiveFormat(format, &archiveMode);
 
@@ -717,6 +737,15 @@ main(int argc, char **argv)
 
 	/* non-matching exclusion patterns aren't an error */
 
+	/*	regarding the "custom fetch" options for me everything is
+		still fine if some (or all of the) provided names don't exist
+		(think, i.e., to batches that provides a standard list of table's
+		names that could be present or not inside the database) */
+	expand_table_name_patterns(fout, &custom_fetch_table_patterns,
+							   &custom_fetch_table_oids,
+							   false);
+	if ((custom_fetch_table_oids.head!=NULL)&&(dopt.custom_fetch_value==0))
+		exit_horribly(NULL, "option --custom-fetch-table requires --custom-fetch-value\n");  
 	/*
 	 * Dumping blobs is the default for dumps where an inclusion switch is not
 	 * used (an "include everything" dump).  -B can be used to exclude blobs
@@ -902,6 +931,9 @@ help(const char *progname)
 	printf(_("  -x, --no-privileges          do not dump privileges (grant/revoke)\n"));
 	printf(_("  --binary-upgrade             for use by upgrade utilities only\n"));
 	printf(_("  --column-inserts             dump data as INSERT commands with column names\n"));
+	printf(_("  --custom-fetch-table=TABLE   defines the name(s) of the table(s) for which to\n"
+			 "                               fetch a custom number of rows\n"));
+	printf(_("  --custom-fetch-value=VALUE   defines how many rows to fetch\n"));
 	printf(_("  --disable-dollar-quoting     disable dollar quoting, use SQL standard quoting\n"));
 	printf(_("  --disable-triggers           disable triggers during data-only restore\n"));
 	printf(_("  --enable-row-security        enable row security (dump only content user has\n"
@@ -1800,10 +1832,12 @@ dumpTableData_insert(Archive *fout, void *dcontext)
 	DumpOptions *dopt = fout->dopt;
 	PQExpBuffer q = createPQExpBuffer();
 	PQExpBuffer insertStmt = NULL;
+	PQExpBuffer fetchStmt = createPQExpBuffer();
 	PGresult   *res;
 	int			tuple;
 	int			nfields;
 	int			field;
+  int     nRecordsToFetch;
 
 	/*
 	 * Make sure we are in proper schema.  We will qualify the table name
@@ -1823,10 +1857,19 @@ dumpTableData_insert(Archive *fout, void *dcontext)
 
 	ExecuteSqlStatement(fout, q->data);
 
+	/* Check if for this table we have to use custom or
+		standard fetch */
+	if (simple_oid_list_member(&custom_fetch_table_oids,
+							   tbinfo->dobj.catId.oid))
+		nRecordsToFetch = dopt->custom_fetch_value;
+	else
+		nRecordsToFetch = 100;
+	appendPQExpBuffer(fetchStmt, "FETCH %d FROM _pg_dump_cursor",
+								 nRecordsToFetch );
+  
 	while (1)
 	{
-		res = ExecuteSqlQuery(fout, "FETCH 100 FROM _pg_dump_cursor",
-							  PGRES_TUPLES_OK);
+		res = ExecuteSqlQuery(fout, fetchStmt->data, PGRES_TUPLES_OK);
 		nfields = PQnfields(res);
 		for (tuple = 0; tuple < PQntuples(res); tuple++)
 		{
@@ -1956,6 +1999,7 @@ dumpTableData_insert(Archive *fout, void *dcontext)
 	destroyPQExpBuffer(q);
 	if (insertStmt != NULL)
 		destroyPQExpBuffer(insertStmt);
+	destroyPQExpBuffer(fetchStmt);
 
 	return 1;
 }
