From 431909d92594e4d3bd6c35a3da546d23f103016f Mon Sep 17 00:00:00 2001
From: Israel Barth Rubio <barthisrael@gmail.com>
Date: Wed, 19 Feb 2025 03:26:30 +0000
Subject: [PATCH v4] pg_combinebackup: add support for hard links

Up to now, pg_combinebackup reconstructs incremental files, if needed,
otherwise copy them from any of the input backups to the output
directory. That copy mecanism can use different methods, depending on
the argument specified by the user.

This commit adds support for a new "copy method": hard links
(-k/--link). When using that mode, instead of copying unmodified files
from the input backups to the output directory, pg_combinebackup
creates the files as hard links from the output directory to the input
backups.

The new link method might speed up the reconstruction of the synthetic
backup (no file copy) and reduce disk usage taken by the synthetic
backup. The benefits depend on the modification pattern of files in
PGDATA between backups, imposed by the workload on Postgres.

This feature requires that the input backups plus the output directory
are in the same file system. Also, caution is required from the user
when modifying or starting the cluster from a synthetic backup, as that
might invalidate one or more of the input backups.

Signed-off-by: Israel Barth Rubio <barthisrael@gmail.com>
---
 doc/src/sgml/ref/pg_combinebackup.sgml      |  32 ++-
 src/bin/pg_combinebackup/copy_file.c        |  34 ++-
 src/bin/pg_combinebackup/copy_file.h        |   1 +
 src/bin/pg_combinebackup/meson.build        |   1 +
 src/bin/pg_combinebackup/pg_combinebackup.c |  12 +-
 src/bin/pg_combinebackup/t/010_links.pl     | 230 ++++++++++++++++++++
 6 files changed, 307 insertions(+), 3 deletions(-)
 create mode 100644 src/bin/pg_combinebackup/t/010_links.pl

diff --git a/doc/src/sgml/ref/pg_combinebackup.sgml b/doc/src/sgml/ref/pg_combinebackup.sgml
index 091982f62a..79c73ad460 100644
--- a/doc/src/sgml/ref/pg_combinebackup.sgml
+++ b/doc/src/sgml/ref/pg_combinebackup.sgml
@@ -137,6 +137,35 @@ PostgreSQL documentation
       </listitem>
      </varlistentry>
 
+     <varlistentry>
+      <term><option>-k</option></term>
+      <term><option>--link</option></term>
+      <listitem>
+       <para>
+        Use hard links instead of copying files to the synthetic backup.
+        Reconstruction of the synthetic backup might be faster (no file copying)
+        and use less disk space, but care must be taken when using the output
+        directory, because any modifications to that directory (for example,
+        starting the server) can also affect the input directories. Likewise,
+        changes to the input directories (for example, starting the server on
+        the full backup) could affect the output directory. Thus, this option
+        is best used when the input directories are only copies that will be
+        removed after <application>pg_combinebackup</application> has completed.
+       </para>
+
+       <para>
+        Requires that the input backups and the output directory are in the
+        same file system.
+       </para>
+
+       <para>
+        If a backup manifest is not available or does not contain checksum of
+        the right type, hard links will still be created, but the file will be
+        also read block-by-block for the checksum calculation.
+       </para>
+      </listitem>
+     </varlistentry>
+
      <varlistentry>
       <term><option>--clone</option></term>
       <listitem>
@@ -167,7 +196,8 @@ PostgreSQL documentation
       <listitem>
        <para>
         Perform regular file copy.  This is the default.  (See also
-        <option>--copy-file-range</option> and <option>--clone</option>.)
+        <option>--copy-file-range</option>, <option>--clone</option> and
+        <option>-k</option>/<option>--link</option>.)
        </para>
       </listitem>
      </varlistentry>
diff --git a/src/bin/pg_combinebackup/copy_file.c b/src/bin/pg_combinebackup/copy_file.c
index 4e27814839..f4f38e9ad7 100644
--- a/src/bin/pg_combinebackup/copy_file.c
+++ b/src/bin/pg_combinebackup/copy_file.c
@@ -40,6 +40,9 @@ static void copy_file_copyfile(const char *src, const char *dst,
 							   pg_checksum_context *checksum_ctx);
 #endif
 
+static void copy_file_link(const char *src, const char *dest,
+						   pg_checksum_context *checksum_ctx);
+
 /*
  * Copy a regular file, optionally computing a checksum, and emitting
  * appropriate debug messages. But if we're in dry-run mode, then just emit
@@ -69,7 +72,14 @@ copy_file(const char *src, const char *dst,
 	}
 
 #ifdef WIN32
-	copy_method = COPY_METHOD_COPYFILE;
+	/*
+	 * Windows only supports two "copy methods": CopyFile and
+	 * CreateHardLink. Whenever the user selects a method other than
+	 * --link, we force pg_combinebackup to use CopyFile, as --clone and
+	 * --copy-file-range are not supported in that platform.
+	 */
+	if (copy_method != COPY_METHOD_LINK)
+		copy_method = COPY_METHOD_COPYFILE;
 #endif
 
 	/* Determine the name of the copy strategy for use in log messages. */
@@ -93,6 +103,10 @@ copy_file(const char *src, const char *dst,
 			strategy_implementation = copy_file_copyfile;
 			break;
 #endif
+		case COPY_METHOD_LINK:
+			strategy_name = "link";
+			strategy_implementation = copy_file_link;
+			break;
 	}
 
 	if (dry_run)
@@ -304,3 +318,21 @@ copy_file_copyfile(const char *src, const char *dst,
 	checksum_file(src, checksum_ctx);
 }
 #endif							/* WIN32 */
+
+/*
+ * copy_file_link
+ * 		Hard-links a file from src to dest.
+ *
+ * If needed, also reads the file and calculates the checksum.
+ */
+static void
+copy_file_link(const char *src, const char *dest,
+			   pg_checksum_context *checksum_ctx)
+{
+	if (link(src, dest) < 0)
+		pg_fatal("error while linking file from \"%s\" to \"%s\": %m",
+				 src, dest);
+
+	/* if needed, calculate checksum of the file */
+	checksum_file(src, checksum_ctx);
+}
diff --git a/src/bin/pg_combinebackup/copy_file.h b/src/bin/pg_combinebackup/copy_file.h
index 92f104115b..5a8517629c 100644
--- a/src/bin/pg_combinebackup/copy_file.h
+++ b/src/bin/pg_combinebackup/copy_file.h
@@ -25,6 +25,7 @@ typedef enum CopyMethod
 #ifdef WIN32
 	COPY_METHOD_COPYFILE,
 #endif
+	COPY_METHOD_LINK,
 } CopyMethod;
 
 extern void copy_file(const char *src, const char *dst,
diff --git a/src/bin/pg_combinebackup/meson.build b/src/bin/pg_combinebackup/meson.build
index 0c4fd9e627..e19c309ad2 100644
--- a/src/bin/pg_combinebackup/meson.build
+++ b/src/bin/pg_combinebackup/meson.build
@@ -37,6 +37,7 @@ tests += {
       't/007_wal_level_minimal.pl',
       't/008_promote.pl',
       't/009_no_full_file.pl',
+      't/010_links.pl',
     ],
   }
 }
diff --git a/src/bin/pg_combinebackup/pg_combinebackup.c b/src/bin/pg_combinebackup/pg_combinebackup.c
index 5864ec574f..e005b033ed 100644
--- a/src/bin/pg_combinebackup/pg_combinebackup.c
+++ b/src/bin/pg_combinebackup/pg_combinebackup.c
@@ -135,6 +135,7 @@ main(int argc, char *argv[])
 		{"no-sync", no_argument, NULL, 'N'},
 		{"output", required_argument, NULL, 'o'},
 		{"tablespace-mapping", required_argument, NULL, 'T'},
+		{"link", no_argument, NULL, 'k'},
 		{"manifest-checksums", required_argument, NULL, 1},
 		{"no-manifest", no_argument, NULL, 2},
 		{"sync-method", required_argument, NULL, 3},
@@ -172,7 +173,7 @@ main(int argc, char *argv[])
 	opt.copy_method = COPY_METHOD_COPY;
 
 	/* process command-line options */
-	while ((c = getopt_long(argc, argv, "dnNo:T:",
+	while ((c = getopt_long(argc, argv, "dnNo:T:k",
 							long_options, &optindex)) != -1)
 	{
 		switch (c)
@@ -193,6 +194,9 @@ main(int argc, char *argv[])
 			case 'T':
 				add_tablespace_mapping(&opt, optarg);
 				break;
+			case 'k':
+				opt.copy_method = COPY_METHOD_LINK;
+				break;
 			case 1:
 				if (!pg_checksum_parse_type(optarg,
 											&opt.manifest_checksums))
@@ -424,6 +428,11 @@ main(int argc, char *argv[])
 		}
 	}
 
+	/* Warn about the possibility of compromising the backups, when link mode */
+	if (opt.copy_method == COPY_METHOD_LINK)
+		pg_log_warning("--link mode was used; any modifications to the output "
+					   "directory may destructively modify input directories");
+
 	/* It's a success, so don't remove the output directories. */
 	reset_directory_cleanup_list();
 	exit(0);
@@ -766,6 +775,7 @@ help(const char *progname)
 	printf(_("  -o, --output=DIRECTORY    output directory\n"));
 	printf(_("  -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
 			 "                            relocate tablespace in OLDDIR to NEWDIR\n"));
+	printf(_("  -k, --link                link files instead of copying\n"));
 	printf(_("      --clone               clone (reflink) files instead of copying\n"));
 	printf(_("      --copy                copy files (default)\n"));
 	printf(_("      --copy-file-range     copy using copy_file_range() system call\n"));
diff --git a/src/bin/pg_combinebackup/t/010_links.pl b/src/bin/pg_combinebackup/t/010_links.pl
new file mode 100644
index 0000000000..3fe4271a8f
--- /dev/null
+++ b/src/bin/pg_combinebackup/t/010_links.pl
@@ -0,0 +1,230 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+#
+# This test aims to validate that hard links are created as expected in the
+# output directory, when running pg_combinebackup with --link mode.
+
+use strict;
+use warnings FATAL => 'all';
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+use Test::More;
+use File::Find;
+
+# Set up a new database instance.
+my $primary = PostgreSQL::Test::Cluster->new('primary');
+$primary->init(has_archiving => 1, allows_streaming => 1);
+$primary->append_conf('postgresql.conf', 'summarize_wal = on');
+$primary->start;
+
+# Create some tables (~35MB each).
+$primary->safe_psql('postgres', <<EOM);
+CREATE TABLE test_1 AS SELECT generate_series(1, 1000000);
+CREATE TABLE test_2 AS SELECT generate_series(1, 1000000);
+CREATE TABLE test_3 AS SELECT generate_series(1, 1000000);
+EOM
+
+# Fetch information about the data files.
+my $query = "SELECT pg_relation_filepath(oid) FROM pg_class WHERE relname = '%s';";
+
+my $pg_attribute_path = $primary->safe_psql('postgres', sprintf($query, 'pg_attribute'));
+note "pg_attribute path is $pg_attribute_path";
+
+my $pg_class_path = $primary->safe_psql('postgres', sprintf($query, 'pg_class'));
+note "pg_class path is $pg_class_path";
+
+my $pg_statistic_path = $primary->safe_psql('postgres', sprintf($query, 'pg_statistic'));
+note "pg_statistic path is $pg_statistic_path";
+
+my $test_1_path = $primary->safe_psql('postgres', sprintf($query, 'test_1'));
+note "test_1 path is $test_1_path";
+
+my $test_2_path = $primary->safe_psql('postgres', sprintf($query, 'test_2'));
+note "test_2 path is $test_2_path";
+
+my $test_3_path = $primary->safe_psql('postgres', sprintf($query, 'test_3'));
+note "test_3 path is $test_3_path";
+
+# Take a full backup.
+my $backup1path = $primary->backup_dir . '/backup1';
+$primary->command_ok(
+	[
+		'pg_basebackup',
+		'--pgdata' => $backup1path,
+		'--no-sync',
+		'--checkpoint' => 'fast',
+        '--wal-method' => 'none'
+	],
+	"full backup");
+
+# Perform an update that touches the whole test_2 data file(s).
+$primary->safe_psql('postgres', <<EOM);
+UPDATE test_2
+SET generate_series = generate_series + 1;
+EOM
+
+# Perform an insert that touches a single page of the last test_3 data file.
+$primary->safe_psql('postgres', <<EOM);
+INSERT INTO test_3 (generate_series) VALUES (1);
+EOM
+
+# Take an incremental backup.
+my $backup2path = $primary->backup_dir . '/backup2';
+$primary->command_ok(
+	[
+		'pg_basebackup',
+		'--pgdata' => $backup2path,
+		'--no-sync',
+		'--checkpoint' => 'fast',
+        '--wal-method' => 'none',
+		'--incremental' => $backup1path . '/backup_manifest'
+	],
+	"incremental backup");
+
+# Restore the incremental backup and use it to create a new node.
+my $restore = PostgreSQL::Test::Cluster->new('restore');
+$restore->init_from_backup(
+	$primary, 'backup2',
+	combine_with_prior => ['backup1'],
+	combine_mode => '--link');
+
+# Ensure files have the expected counter of hard links.
+# We expect all files to have 2 hard links in this case, except for:
+#
+# * backup_label and backup_manifest: the backups were taken in different
+#   LSNs and with different contents on the test tables.
+# * pg_attribute, pg_class and pg_statistic might have been modified by Postgres
+#   in the meantime, so they can have 1 or 2 hard links.
+# * data file of table test_3 is different because of changes in a page.
+
+# Directory to traverse
+my $restore_dir = $restore->data_dir;
+
+# Work around differences between Windows and Linux test runners.
+# The find function from the File::Find module returns paths with forward
+# slashes, while the restore->data_dir variable contains back slashes on
+# Windows. This step is just to normalize the paths, so we are able to
+# match strings later.
+my $restore_dir_normalized = $restore_dir;
+$restore_dir_normalized =~ s/\\/\//g;
+
+# Set of non-linked files (these are the ones with 1 hard link)
+my %non_linked_files = map { $_ => 1 } (
+    join('/', $restore_dir_normalized, 'backup_manifest'),
+    join('/', $restore_dir_normalized, 'backup_label')
+);
+
+# Set of linked or non-linked files (these are the ones that may be with 1 or 2
+# hard links)
+my %linked_or_non_linked_files = map { $_ => 1 } (
+    join('/', $restore_dir_normalized, $pg_attribute_path),
+    join('/', $restore_dir_normalized, $pg_class_path),
+    join('/', $restore_dir_normalized, $pg_statistic_path),
+);
+
+# By default Postgres uses 1GB segments for the data files, and our test tables
+# are 35MB worth of data each. However, that segment size is configurable, so we
+# have to handle all possibilities here. Cirrus CI e.g. is configured with 6
+# blocks per segment, and we need to cover that test case too. That's why we
+# didn't put test_3_path in the non_linked_files variable.
+my $test_3_full_path = join('/', $restore_dir_normalized, $test_3_path);
+my @test_3_segments = ();
+
+# Recursively traverse the directory
+find(sub {
+    my $file = $File::Find::name;
+
+    # Skip directories
+    return if -d $file;
+
+    # Get base name, in case it is a data file and it has more than one segment
+    # This logic is used also for non-data files, and in essence it does
+    # nothing in that case, as most of the non-data files contain no dots in the
+    # name.
+    my $basename = (split /\./, $file)[0];
+
+    if ($test_3_full_path eq $basename) {
+        # The test_3 table is a special case, because we touched a single block
+        # of the last segment of its data file. So we need to handle this case
+        # separately later.
+        push @test_3_segments, $file;
+    } else {
+        # Get the file's stat information
+        my $nlink_count = get_hard_link_count($file);
+
+        if (exists $non_linked_files{$basename}) {
+            # Non-linked files should have 1 hard link
+            ok($nlink_count == 1, "File '$file' has 1 hard link");
+        } elsif (exists $linked_or_non_linked_files{$basename}) {
+            # These files can have either 1 or 2 hard links, as they are a special case
+            ok($nlink_count == 1 || $nlink_count == 2, "File '$file' has 1 or 2 hard link");
+        } else {
+            # Other files should have 2 hard links
+            ok($nlink_count == 2, "File '$file' has 2 hard links");
+        }
+    }
+}, $restore_dir);
+
+# All segments of the data file of the table test_3 should contain 2 hard links,
+# except for the last one, where we inserted one tuple.
+@test_3_segments = sort { natural_sort ($a, $b) } @test_3_segments;
+my $last_test_3_segment = pop @test_3_segments;
+
+for my $test_3_segment (@test_3_segments) {
+    # Get the file's stat information of each segment
+    my $nlink_count = get_hard_link_count($test_3_segment);
+    ok($nlink_count == 2, "File '$test_3_segment' has 2 hard links");
+}
+
+# Get the file's stat information of the last segment
+my $nlink_count = get_hard_link_count($last_test_3_segment);
+ok($nlink_count == 1, "File '$last_test_3_segment' has 1 hard link");
+
+# OK, that's all.
+done_testing();
+
+# Natural comparison subroutine for strings with numbers
+# This is just a helper function for sorting strings with numbers (we want
+# "base/123.13" to come before "base/123.123", for example)
+sub natural_sort {
+    my ($a, $b) = @_;
+
+    # Split into non-numeric and numeric parts
+    my @parts_a = $a =~ /(\D+|\d+)/g;
+    my @parts_b = $b =~ /(\D+|\d+)/g;
+
+    for (my $i = 0; $i < scalar(@parts_a) && $i < scalar(@parts_b); $i++) {
+        if ($parts_a[$i] =~ /^\d/ && $parts_b[$i] =~ /^\d/) {
+            # Compare numerically if both parts are numbers
+            if ($parts_a[$i] < $parts_b[$i]) {
+                return -1;
+            }
+            elsif ($parts_a[$i] > $parts_b[$i]) {
+                return 1;
+            }
+        }
+        else {
+            # Compare lexicographically if not numbers
+            if ($parts_a[$i] lt $parts_b[$i]) {
+                return -1;
+            }
+            elsif ($parts_a[$i] gt $parts_b[$i]) {
+                return 1;
+            }
+        }
+    }
+
+    # If all compared parts are the same, the shorter string comes first
+    return scalar(@parts_a) <=> scalar(@parts_b);
+}
+
+
+# Subroutine to get hard-link count of a given file.
+sub get_hard_link_count {
+    my ($file) = @_;
+
+    # Get file stats
+    my @stats = stat($file);
+    my $nlink = $stats[3];  # Number of hard links
+
+    return $nlink;
+}
-- 
2.43.5

