Greg Stark wrote: > On Sat, Jun 12, 2010 at 4:58 AM, Bruce Momjian <br...@momjian.us> wrote: > > However, I might have been too conservative. ?How do tools that generate > > multiple output files usually handle this situation? ?Do they output in > > to a subdirectory in $HOME, or in a subdirectory of the current > > directory, or just create multiple files without a subdirectory? > > Generally they put them in the current directory without > subdirectories but take a parameter to specify a directory to use. > That parameter could be mandatory though if you're afraid the current > directory isn't a suitable place.
Agreed. I have applied the attached patch which creates the files in the current directory. I think that will be fine and don't see any need for a directory parameter. I have kept the printing of the full path name in the output: Upgrade complete ---------------- | Optimizer statistics is not transferred by pg_upgrade | so consider running: | vacuumdb --all --analyze-only | on the newly-upgraded cluster. | Running this script will delete the old cluster's data files: | /u/pg_migrator/pg_migrator/delete_old_cluster.sh I figured this would be helpful for people on Windows who might not know the actual directory used for the files. However, it does make the display kind of wide. Ideas? -- Bruce Momjian <br...@momjian.us> http://momjian.us EnterpriseDB http://enterprisedb.com + None of us is going to be here forever. +
Index: contrib/pg_upgrade/check.c =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/check.c,v retrieving revision 1.5 diff -c -c -r1.5 check.c *** contrib/pg_upgrade/check.c 23 May 2010 16:54:13 -0000 1.5 --- contrib/pg_upgrade/check.c 12 Jun 2010 17:01:23 -0000 *************** *** 381,387 **** prep_status(ctx, "Creating script to delete old cluster"); snprintf(*deletion_script_file_name, MAXPGPATH, "%s/delete_old_cluster.%s", ! ctx->output_dir, EXEC_EXT); if ((script = fopen(*deletion_script_file_name, "w")) == NULL) pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", --- 381,387 ---- prep_status(ctx, "Creating script to delete old cluster"); snprintf(*deletion_script_file_name, MAXPGPATH, "%s/delete_old_cluster.%s", ! ctx->cwd, EXEC_EXT); if ((script = fopen(*deletion_script_file_name, "w")) == NULL) pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", Index: contrib/pg_upgrade/dump.c =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/dump.c,v retrieving revision 1.1 diff -c -c -r1.1 dump.c *** contrib/pg_upgrade/dump.c 12 May 2010 02:19:10 -0000 1.1 --- contrib/pg_upgrade/dump.c 12 Jun 2010 17:01:23 -0000 *************** *** 21,27 **** exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --schema-only " "--binary-upgrade > \"%s/" ALL_DUMP_FILE "\"" SYSTEMQUOTE, ! ctx->new.bindir, ctx->old.port, ctx->output_dir); check_ok(ctx); } --- 21,27 ---- exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --schema-only " "--binary-upgrade > \"%s/" ALL_DUMP_FILE "\"" SYSTEMQUOTE, ! ctx->new.bindir, ctx->old.port, ctx->cwd); check_ok(ctx); } *************** *** 52,64 **** char filename[MAXPGPATH]; bool suppressed_username = false; ! snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE); if ((all_dump = fopen(filename, "r")) == NULL) pg_log(ctx, PG_FATAL, "Cannot open dump file %s\n", filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE); if ((globals_dump = fopen(filename, "w")) == NULL) pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE); if ((db_dump = fopen(filename, "w")) == NULL) pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename); current_output = globals_dump; --- 52,64 ---- char filename[MAXPGPATH]; bool suppressed_username = false; ! snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, ALL_DUMP_FILE); if ((all_dump = fopen(filename, "r")) == NULL) pg_log(ctx, PG_FATAL, "Cannot open dump file %s\n", filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, GLOBALS_DUMP_FILE); if ((globals_dump = fopen(filename, "w")) == NULL) pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, DB_DUMP_FILE); if ((db_dump = fopen(filename, "w")) == NULL) pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename); current_output = globals_dump; Index: contrib/pg_upgrade/function.c =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/function.c,v retrieving revision 1.2 diff -c -c -r1.2 function.c *** contrib/pg_upgrade/function.c 13 May 2010 01:03:01 -0000 1.2 --- contrib/pg_upgrade/function.c 12 Jun 2010 17:01:23 -0000 *************** *** 213,219 **** prep_status(ctx, "Checking for presence of required libraries"); snprintf(output_path, sizeof(output_path), "%s/loadable_libraries.txt", ! ctx->output_dir); for (libnum = 0; libnum < ctx->num_libraries; libnum++) { --- 213,219 ---- prep_status(ctx, "Checking for presence of required libraries"); snprintf(output_path, sizeof(output_path), "%s/loadable_libraries.txt", ! ctx->cwd); for (libnum = 0; libnum < ctx->num_libraries; libnum++) { Index: contrib/pg_upgrade/option.c =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/option.c,v retrieving revision 1.5 diff -c -c -r1.5 option.c *** contrib/pg_upgrade/option.c 25 May 2010 15:55:28 -0000 1.5 --- contrib/pg_upgrade/option.c 12 Jun 2010 17:01:23 -0000 *************** *** 84,103 **** if (user_id == 0) pg_log(ctx, PG_FATAL, "%s: cannot be run as root\n", ctx->progname); ! #ifndef WIN32 ! get_home_path(ctx->home_dir); ! #else ! { ! char *tmppath; ! ! /* TMP is the best place on Windows, rather than APPDATA */ ! if ((tmppath = getenv("TMP")) == NULL) ! pg_log(ctx, PG_FATAL, "TMP environment variable is not set.\n"); ! snprintf(ctx->home_dir, MAXPGPATH, "%s", tmppath); ! } ! #endif ! ! snprintf(ctx->output_dir, MAXPGPATH, "%s/" OUTPUT_SUBDIR, ctx->home_dir); while ((option = getopt_long(argc, argv, "d:D:b:B:cgG:kl:p:P:u:v", long_options, &optindex)) != -1) --- 84,90 ---- if (user_id == 0) pg_log(ctx, PG_FATAL, "%s: cannot be run as root\n", ctx->progname); ! getcwd(ctx->cwd, MAXPGPATH); while ((option = getopt_long(argc, argv, "d:D:b:B:cgG:kl:p:P:u:v", long_options, &optindex)) != -1) Index: contrib/pg_upgrade/pg_upgrade.c =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/pg_upgrade.c,v retrieving revision 1.4 diff -c -c -r1.4 pg_upgrade.c *** contrib/pg_upgrade/pg_upgrade.c 19 May 2010 18:27:43 -0000 1.4 --- contrib/pg_upgrade/pg_upgrade.c 12 Jun 2010 17:01:23 -0000 *************** *** 18,24 **** static void set_frozenxids(migratorContext *ctx); static void setup(migratorContext *ctx, char *argv0, bool live_check); static void cleanup(migratorContext *ctx); - static void create_empty_output_directory(migratorContext *ctx); int --- 18,23 ---- *************** *** 37,44 **** setup(&ctx, argv[0], live_check); - create_empty_output_directory(&ctx); - check_cluster_versions(&ctx); check_cluster_compatibility(&ctx, live_check); --- 36,41 ---- *************** *** 201,207 **** exec_prog(ctx, true, SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d " "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE, ! ctx->new.bindir, ctx->new.port, ctx->output_dir, GLOBALS_DUMP_FILE, ctx->logfile); check_ok(ctx); --- 198,204 ---- exec_prog(ctx, true, SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d " "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE, ! ctx->new.bindir, ctx->new.port, ctx->cwd, GLOBALS_DUMP_FILE, ctx->logfile); check_ok(ctx); *************** *** 223,229 **** exec_prog(ctx, true, SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d " "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE, ! ctx->new.bindir, ctx->new.port, ctx->output_dir, DB_DUMP_FILE, ctx->logfile); check_ok(ctx); --- 220,226 ---- exec_prog(ctx, true, SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d " "-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE, ! ctx->new.bindir, ctx->new.port, ctx->cwd, DB_DUMP_FILE, ctx->logfile); check_ok(ctx); *************** *** 399,431 **** if (ctx->debug_fd) fclose(ctx->debug_fd); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE); unlink(filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE); unlink(filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE); unlink(filename); } - - - /* - * create_empty_output_directory - * - * Create empty directory for output files - */ - static void - create_empty_output_directory(migratorContext *ctx) - { - /* - * rmtree() outputs a warning if the directory does not exist, - * so we try to create the directory first. - */ - if (mkdir(ctx->output_dir, S_IRWXU) != 0) - { - if (errno == EEXIST) - rmtree(ctx->output_dir, false); - else - pg_log(ctx, PG_FATAL, "Cannot create subdirectory %s: %s\n", - ctx->output_dir, getErrorText(errno)); - } - } --- 396,405 ---- if (ctx->debug_fd) fclose(ctx->debug_fd); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, ALL_DUMP_FILE); unlink(filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, GLOBALS_DUMP_FILE); unlink(filename); ! snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, DB_DUMP_FILE); unlink(filename); } Index: contrib/pg_upgrade/pg_upgrade.h =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/pg_upgrade.h,v retrieving revision 1.5 diff -c -c -r1.5 pg_upgrade.h *** contrib/pg_upgrade/pg_upgrade.h 14 May 2010 00:13:38 -0000 1.5 --- contrib/pg_upgrade/pg_upgrade.h 12 Jun 2010 17:01:23 -0000 *************** *** 29,36 **** #define OVERWRITE_MESSAGE " %-" MESSAGE_WIDTH "." MESSAGE_WIDTH "s\r" #define GET_MAJOR_VERSION(v) ((v) / 100) - #define OUTPUT_SUBDIR "pg_upgrade_output" - #define ALL_DUMP_FILE "pg_upgrade_dump_all.sql" /* contains both global db information and CREATE DATABASE commands */ #define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql" --- 29,34 ---- *************** *** 217,224 **** const char *progname; /* complete pathname for this program */ char *exec_path; /* full path to my executable */ char *user; /* username for clusters */ ! char home_dir[MAXPGPATH]; /* name of user's home directory */ ! char output_dir[MAXPGPATH]; /* directory for pg_upgrade output */ char **tablespaces; /* tablespaces */ int num_tablespaces; char **libraries; /* loadable libraries */ --- 215,221 ---- const char *progname; /* complete pathname for this program */ char *exec_path; /* full path to my executable */ char *user; /* username for clusters */ ! char cwd[MAXPGPATH]; /* directory for pg_upgrade output */ char **tablespaces; /* tablespaces */ int num_tablespaces; char **libraries; /* loadable libraries */ Index: contrib/pg_upgrade/version.c =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/version.c,v retrieving revision 1.1 diff -c -c -r1.1 version.c *** contrib/pg_upgrade/version.c 12 May 2010 02:19:11 -0000 1.1 --- contrib/pg_upgrade/version.c 12 Jun 2010 17:01:23 -0000 *************** *** 28,34 **** prep_status(ctx, "Checking for large objects"); snprintf(output_path, sizeof(output_path), "%s/pg_largeobject.sql", ! ctx->output_dir); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { --- 28,34 ---- prep_status(ctx, "Checking for large objects"); snprintf(output_path, sizeof(output_path), "%s/pg_largeobject.sql", ! ctx->cwd); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { Index: contrib/pg_upgrade/version_old_8_3.c =================================================================== RCS file: /cvsroot/pgsql/contrib/pg_upgrade/version_old_8_3.c,v retrieving revision 1.2 diff -c -c -r1.2 version_old_8_3.c *** contrib/pg_upgrade/version_old_8_3.c 14 May 2010 00:13:38 -0000 1.2 --- contrib/pg_upgrade/version_old_8_3.c 12 Jun 2010 17:01:23 -0000 *************** *** 28,34 **** prep_status(ctx, "Checking for invalid 'name' user columns"); snprintf(output_path, sizeof(output_path), "%s/tables_using_name.txt", ! ctx->output_dir); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { --- 28,34 ---- prep_status(ctx, "Checking for invalid 'name' user columns"); snprintf(output_path, sizeof(output_path), "%s/tables_using_name.txt", ! ctx->cwd); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { *************** *** 123,129 **** prep_status(ctx, "Checking for tsquery user columns"); snprintf(output_path, sizeof(output_path), "%s/tables_using_tsquery.txt", ! ctx->output_dir); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { --- 123,129 ---- prep_status(ctx, "Checking for tsquery user columns"); snprintf(output_path, sizeof(output_path), "%s/tables_using_tsquery.txt", ! ctx->cwd); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { *************** *** 222,228 **** } snprintf(output_path, sizeof(output_path), "%s/contrib_isn_and_int8_pass_by_value.txt", ! ctx->output_dir); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { --- 222,228 ---- } snprintf(output_path, sizeof(output_path), "%s/contrib_isn_and_int8_pass_by_value.txt", ! ctx->cwd); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { *************** *** 312,318 **** prep_status(ctx, "Checking for tsvector user columns"); snprintf(output_path, sizeof(output_path), "%s/rebuild_tsvector_tables.sql", ! ctx->output_dir); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { --- 312,318 ---- prep_status(ctx, "Checking for tsvector user columns"); snprintf(output_path, sizeof(output_path), "%s/rebuild_tsvector_tables.sql", ! ctx->cwd); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { *************** *** 457,463 **** prep_status(ctx, "Checking for hash and gin indexes"); snprintf(output_path, sizeof(output_path), "%s/reindex_hash_and_gin.sql", ! ctx->output_dir); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { --- 457,463 ---- prep_status(ctx, "Checking for hash and gin indexes"); snprintf(output_path, sizeof(output_path), "%s/reindex_hash_and_gin.sql", ! ctx->cwd); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { *************** *** 573,579 **** prep_status(ctx, "Checking for bpchar_pattern_ops indexes"); snprintf(output_path, sizeof(output_path), "%s/reindex_bpchar_ops.sql", ! ctx->output_dir); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { --- 573,579 ---- prep_status(ctx, "Checking for bpchar_pattern_ops indexes"); snprintf(output_path, sizeof(output_path), "%s/reindex_bpchar_ops.sql", ! ctx->cwd); for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++) { *************** *** 705,711 **** bool found = false; char *output_path = pg_malloc(ctx, MAXPGPATH); ! snprintf(output_path, MAXPGPATH, "%s/adjust_sequences.sql", ctx->output_dir); prep_status(ctx, "Creating script to adjust sequences"); --- 705,711 ---- bool found = false; char *output_path = pg_malloc(ctx, MAXPGPATH); ! snprintf(output_path, MAXPGPATH, "%s/adjust_sequences.sql", ctx->cwd); prep_status(ctx, "Creating script to adjust sequences");
-- Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org) To make changes to your subscription: http://www.postgresql.org/mailpref/pgsql-hackers