--- Begin Message ---
Package: backuppc
Version: 2.1.2-5
Severity: normal
Tags: patch
Hi,
backuppc recently managed to overflow $conf{MaxHardLinks} (31999)
and shortly later reached the system limit (64535) throwing tons of
errors: "Unable to link ..."
Checking the upstream CVS I found that this has problem has already
been fixed in 3.0.0alpha. I backported these changes and have been using
these modifications for a week without link errors or other problems.
A new subroutine LinkOrCopy() has been added and is used instead of
plain link(). The patch is attached.
Andreas
-- System Information:
Debian Release: testing/unstable
APT prefers testing
APT policy: (900, 'testing'), (900, 'stable'), (600, 'unstable')
Architecture: i386 (i686)
Shell: /bin/sh linked to /bin/bash
Kernel: Linux 2.6.16-1-k7
Locale: LANG=C, LC_CTYPE=en_US.UTF-8 (charmap=UTF-8)
Versions of packages backuppc depends on:
ii adduser 3.87 Add and remove users and groups
ii apache 1.3.34-2 versatile, high-performance HTTP s
ii apache-ssl 1.3.34-2 versatile, high-performance HTTP s
ii apache2-mpm-prefork [apache 2.0.55-4 traditional model for Apache2
ii debconf [debconf-2.0] 1.5.1 Debian configuration management sy
ii dpkg 1.13.19 package maintenance system for Deb
ii exim4 4.44-2 metapackage to ease exim MTA (v4)
ii exim4-daemon-heavy [mail-tr 4.44-2 exim MTA (v4) daemon with extended
ii libarchive-zip-perl 1.16-1 Module for manipulation of ZIP arc
ii libcompress-zlib-perl 1.41-1 Perl module for creation and manip
ii perl [libdigest-md5-perl] 5.8.8-4 Larry Wall's Practical Extraction
ii perl-suid 5.8.8-4 Runs setuid Perl scripts
ii samba-common 3.0.22-1 Samba common files used by both th
ii smbclient 3.0.22-1 a LanManager-like simple client fo
ii tar 1.15.1dfsg-3 GNU tar
ii wwwconfig-common 0.0.45 Debian web auto configuration
backuppc recommends no packages.
-- debconf information excluded
Index: backuppc-2.1.2/bin/BackupPC_dump
===================================================================
--- backuppc-2.1.2.orig/bin/BackupPC_dump 2006-07-02 18:15:29.000000000
+0200
+++ backuppc-2.1.2/bin/BackupPC_dump 2006-07-02 18:25:04.000000000 +0200
@@ -1241,6 +1241,7 @@
{
my @Backups = $bpc->BackupInfoRead($client);
my $num = -1;
+ my $newFilesFH;
#
# Since we got a good backup we should remove any partial dumps
@@ -1309,11 +1310,34 @@
$file = "$f->{share}/$f->{file}";
}
next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
- if ( !link("$Dir/$Backups[$j]{num}/$file",
- "$Dir/$num/$shareM/$fileM") ) {
- my $str = \"Unable to link $num/$f->{share}/$f->{file} to"
- . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
- $XferLOG->write(\$str);
+
+ my($exists, $digest, $origSize, $outSize, $errs)
+ = BackupPC::PoolWrite::LinkOrCopy(
+ $bpc,
+ "$Dir/$Backups[$j]{num}/$file",
+ $Backups[$j]{compress},
+ "$Dir/$num/$shareM/$fileM",
+ $Conf{CompressLevel});
+ if ( !$exists ) {
+ #
+ # the hard link failed, most likely because the target
+ # file has too many links. We have copied the file
+ # instead, so add this to the new file list.
+ #
+ if ( !defined($newFilesFH) ) {
+ my $str = "Appending to NewFileList for $shareM/$fileM\n";
+ $XferLOG->write(\$str);
+ open($newFilesFH, ">>", "$TopDir/pc/$client/NewFileList")
+ || die("can't open $TopDir/pc/$client/NewFileList");
+ binmode($newFilesFH);
+ }
+ if ( -f "$Dir/$num/$shareM/$fileM" ) {
+ print($newFilesFH "$digest $origSize $shareM/$fileM\n");
+ } else {
+ my $str = "Unable to link/copy $num/$f->{share}/$f->{file}"
+ . " to $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+ $XferLOG->write(\$str);
+ }
} else {
my $str = "Bad file $num/$f->{share}/$f->{file} replaced"
. " by link to"
@@ -1328,6 +1352,7 @@
$XferLOG->write(\$str);
}
}
+ close($newFilesFH) if ( defined($newFilesFH) );
$XferLOG->close();
rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$num$fileExt");
rename("$Dir/NewFileList", "$Dir/NewFileList.$num");
Index: backuppc-2.1.2/bin/BackupPC_link
===================================================================
--- backuppc-2.1.2.orig/bin/BackupPC_link 2006-07-02 18:15:38.000000000
+0200
+++ backuppc-2.1.2/bin/BackupPC_link 2006-07-02 18:27:01.000000000 +0200
@@ -211,7 +211,21 @@
#
# Exists in the older filled backup, and not in the new, so link it
#
- link($name, $newName);
+ my($exists, $digest, $origSize, $outSize, $errs)
+ = BackupPC::PoolWrite::LinkOrCopy(
+ $bpc,
+ $name, $Compress,
+ $newName, $Compress);
+ if ( $exists ) {
+ $nFilesExist++;
+ $sizeExist += $origSize;
+ $sizeExistComp += $outSize;
+ } elsif ( $outSize > 0 ) {
+ $nFilesNew++;
+ $sizeNew += $origSize;
+ $sizeNewComp += -s $outSize;
+ LinkNewFile($digest, $origSize, $newName);
+ }
}
}
Index: backuppc-2.1.2/lib/BackupPC/PoolWrite.pm
===================================================================
--- backuppc-2.1.2.orig/lib/BackupPC/PoolWrite.pm 2006-07-02
18:13:38.000000000 +0200
+++ backuppc-2.1.2/lib/BackupPC/PoolWrite.pm 2006-07-02 18:14:40.000000000
+0200
@@ -494,4 +494,53 @@
return 1;
}
+#
+# LinkOrCopy() does a hardlink from oldFile to newFile.
+#
+# If that fails (because there are too many links on oldFile)
+# then oldFile is copied to newFile, and the pool stats are
+# returned to be added to the new file list. That allows
+# BackupPC_link to try again, and to create a new pool file
+# if necessary.
+#
+sub LinkOrCopy
+{
+ my($bpc, $oldFile, $oldFileComp, $newFile, $newFileComp) = @_;
+ my($nRead, $data);
+
+ unlink($newFile) if ( -f $newFile );
+ #
+ # Try to link if hardlink limit is ok, and compression types
+ # are the same
+ #
+ return (1, undef) if ( (stat($oldFile))[3] < $bpc->{Conf}{HardLinkMax}
+ && !$oldFileComp == !$newFileComp
+ && link($oldFile, $newFile) );
+ #
+ # There are too many links on oldFile, or compression
+ # type if different, so now we have to copy it.
+ #
+ # We need to compute the file size, which is expensive
+ # since we need to read the file twice. That's probably
+ # ok since the hardlink limit is rarely hit.
+ #
+ my $readFd = BackupPC::FileZIO->open($oldFile, 0, $oldFileComp);
+ if ( !defined($readFd) ) {
+ return (0, undef, undef, undef, ["LinkOrCopy: can't open $oldFile"]);
+ }
+ while ( $readFd->read(\$data, $BufSize) > 0 ) {
+ $nRead += length($data);
+ }
+ $readFd->rewind();
+
+ my $poolWrite = BackupPC::PoolWrite->new($bpc, $newFile,
+ $nRead, $newFileComp);
+ while ( $readFd->read(\$data, $BufSize) > 0 ) {
+ $poolWrite->write(\$data);
+ }
+ my($exists, $digest, $outSize, $errs) = $poolWrite->close;
+
+ return ($exists, $digest, $nRead, $outSize, $errs);
+}
+
1;
Index: backuppc-2.1.2/lib/BackupPC/Xfer/RsyncFileIO.pm
===================================================================
--- backuppc-2.1.2.orig/lib/BackupPC/Xfer/RsyncFileIO.pm 2006-07-02
18:15:48.000000000 +0200
+++ backuppc-2.1.2/lib/BackupPC/Xfer/RsyncFileIO.pm 2006-07-02
18:18:40.000000000 +0200
@@ -1043,22 +1043,38 @@
#
my $rxOutFile = $fio->{outDirSh}
. $fio->{bpc}->fileNameMangle($name);
- if ( !link($attr->{fullPath}, $rxOutFile) ) {
- $fio->log("Unable to link $attr->{fullPath} to $rxOutFile");
- $fio->{stats}{errorCnt}++;
- $ret = -1;
- } else {
- #
- # Cumulate the stats
- #
- $fio->{stats}{TotalFileCnt}++;
- $fio->{stats}{TotalFileSize} += $fio->{rxSize};
- $fio->{stats}{ExistFileCnt}++;
- $fio->{stats}{ExistFileSize} += $fio->{rxSize};
- $fio->{stats}{ExistFileCompSize} += -s $rxOutFile;
- $fio->{rxFile}{size} = $fio->{rxSize};
- $ret = $fio->attribSet($fio->{rxFile});
- }
+ my($exists, $digest, $origSize, $outSize, $errs)
+ = BackupPC::PoolWrite::LinkOrCopy(
+ $fio->{bpc},
+ $attr->{fullPath},
+ $attr->{compress},
+ $rxOutFile,
+ $fio->{xfer}{compress});
+ #
+ # Cumulate the stats
+ #
+ $fio->{stats}{TotalFileCnt}++;
+ $fio->{stats}{TotalFileSize} += $fio->{rxSize};
+ $fio->{stats}{ExistFileCnt}++;
+ $fio->{stats}{ExistFileSize} += $fio->{rxSize};
+ $fio->{stats}{ExistFileCompSize} += -s $rxOutFile;
+ $fio->{rxFile}{size} = $fio->{rxSize};
+ $ret = $fio->attribSet($fio->{rxFile});
+ $fio->log(@$errs) if ( defined($errs) && @$errs );
+
+ if ( !$exists && $outSize > 0 ) {
+ #
+ # the hard link failed, most likely because the target
+ # file has too many links. We have copied the file
+ # instead, so add this to the new file list.
+ #
+ my $rxOutFileRel = "$fio->{shareM}/"
+ . $fio->{bpc}->fileNameMangle($name);
+ $rxOutFileRel =~ s{^/+}{};
+ my $fh = $fio->{newFilesFH};
+ print($fh "$digest $origSize $rxOutFileRel\n")
+ if ( defined($fh) );
+ }
}
} else {
my $exist = $fio->processClose($fio->{rxOutFd},
--- End Message ---