X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=blobdiff_plain;f=lib%2FBackupPC%2FPoolWrite.pm;h=415c67f40776eda0e87412ad44bbc1ed6ded861c;hp=2cfd8dca5b9a2700a98977371a672124eb03b582;hb=5b79f9a3c01bca16dd4d211e76fc53daa549e421;hpb=79e0593c501fd58046feeca20c315cc4ac283435 diff --git a/lib/BackupPC/PoolWrite.pm b/lib/BackupPC/PoolWrite.pm index 2cfd8dc..415c67f 100644 --- a/lib/BackupPC/PoolWrite.pm +++ b/lib/BackupPC/PoolWrite.pm @@ -38,7 +38,7 @@ # Craig Barratt # # COPYRIGHT -# Copyright (C) 2001-2003 Craig Barratt +# Copyright (C) 2001-2007 Craig Barratt # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ # #======================================================================== # -# Version 2.1.0beta2, released 23 May 2004. +# Version 3.1.0, released 25 Nov 2007. # # See http://backuppc.sourceforge.net. # @@ -142,7 +142,19 @@ sub write my $fileName = $a->{fileCnt} < 0 ? $a->{base} : "$a->{base}_$a->{fileCnt}"; last if ( !-f $fileName ); + # + # Don't attempt to match pool files that already + # have too many hardlinks. Also, don't match pool + # files with only one link since starting in + # BackupPC v3.0, BackupPC_nightly could be running + # in parallel (and removing those files). This doesn't + # eliminate all possible race conditions, but just + # reduces the odds. Other design steps eliminate + # the remaining race conditions of linking vs + # removing. + # if ( (stat(_))[3] >= $a->{hardLinkMax} + || (stat(_))[3] <= 1 || !defined($fh = BackupPC::FileZIO->open($fileName, 0, $a->{compress})) ) { $a->{fileCnt}++; @@ -352,13 +364,6 @@ sub write } } - # - # Close the compare files - # - foreach my $f ( @{$a->{files}} ) { - $f->{fh}->close(); - } - if ( $a->{fileSize} == 0 ) { # # Simply create an empty file @@ -370,9 +375,21 @@ sub write } else { close(OUT); } + # + # Close the compare files + # + foreach my $f ( @{$a->{files}} ) { + $f->{fh}->close(); + } return (1, $a->{digest}, -s $a->{fileName}, $a->{errors}); } elsif ( defined($a->{fhOut}) ) { $a->{fhOut}->close(); + # + # Close the compare files + # + foreach my $f ( @{$a->{files}} ) { + $f->{fh}->close(); + } return (0, $a->{digest}, -s $a->{fileName}, $a->{errors}); } else { if ( @{$a->{files}} == 0 ) { @@ -390,12 +407,51 @@ sub write #} #push(@{$a->{errors}}, $str); } - #print(" Linking $a->{fileName} to $a->{files}[0]->{name}\n"); - if ( @{$a->{files}} && !link($a->{files}[0]->{name}, $a->{fileName}) ) { - push(@{$a->{errors}}, "Can't link $a->{fileName} to" - . " $a->{files}[0]->{name}\n"); + for ( my $i = 0 ; $i < @{$a->{files}} ; $i++ ) { + if ( link($a->{files}[$i]->{name}, $a->{fileName}) ) { + #print(" Linked $a->{fileName} to $a->{files}[$i]->{name}\n"); + # + # Close the compare files + # + foreach my $f ( @{$a->{files}} ) { + $f->{fh}->close(); + } + return (1, $a->{digest}, -s $a->{fileName}, $a->{errors}); + } } - return (1, $a->{digest}, -s $a->{fileName}, $a->{errors}); + # + # We were unable to link to the pool. Either we're at the + # hardlink max, or the pool file got deleted. Recover by + # writing the matching file, since we still have an open + # handle. + # + for ( my $i = 0 ; $i < @{$a->{files}} ; $i++ ) { + if ( !$a->{files}[$i]->{fh}->rewind() ) { + push(@{$a->{errors}}, + "Unable to rewind $a->{files}[$i]->{name}" + . " for copy after link fail\n"); + next; + } + $a->{fhOut} = BackupPC::FileZIO->open($a->{fileName}, + 1, $a->{compress}); + if ( !defined($a->{fhOut}) ) { + push(@{$a->{errors}}, + "Unable to open $a->{fileName}" + . " for writing after link fail\n"); + } else { + $a->filePartialCopy($a->{files}[$i]->{fh}, $a->{fhOut}, + $a->{nWrite}); + $a->{fhOut}->close; + } + last; + } + # + # Close the compare files + # + foreach my $f ( @{$a->{files}} ) { + $f->{fh}->close(); + } + return (0, $a->{digest}, -s $a->{fileName}, $a->{errors}); } } @@ -494,4 +550,53 @@ sub filePartialCompare return 1; } +# +# LinkOrCopy() does a hardlink from oldFile to newFile. +# +# If that fails (because there are too many links on oldFile) +# then oldFile is copied to newFile, and the pool stats are +# returned to be added to the new file list. That allows +# BackupPC_link to try again, and to create a new pool file +# if necessary. +# +sub LinkOrCopy +{ + my($bpc, $oldFile, $oldFileComp, $newFile, $newFileComp) = @_; + my($nRead, $data); + + unlink($newFile) if ( -f $newFile ); + # + # Try to link if hardlink limit is ok, and compression types + # are the same + # + return (1, undef) if ( (stat($oldFile))[3] < $bpc->{Conf}{HardLinkMax} + && !$oldFileComp == !$newFileComp + && link($oldFile, $newFile) ); + # + # There are too many links on oldFile, or compression + # type if different, so now we have to copy it. + # + # We need to compute the file size, which is expensive + # since we need to read the file twice. That's probably + # ok since the hardlink limit is rarely hit. + # + my $readFd = BackupPC::FileZIO->open($oldFile, 0, $oldFileComp); + if ( !defined($readFd) ) { + return (0, undef, undef, undef, ["LinkOrCopy: can't open $oldFile"]); + } + while ( $readFd->read(\$data, $BufSize) > 0 ) { + $nRead += length($data); + } + $readFd->rewind(); + + my $poolWrite = BackupPC::PoolWrite->new($bpc, $newFile, + $nRead, $newFileComp); + while ( $readFd->read(\$data, $BufSize) > 0 ) { + $poolWrite->write(\$data); + } + my($exists, $digest, $outSize, $errs) = $poolWrite->close; + + return ($exists, $digest, $nRead, $outSize, $errs); +} + 1;