# Craig Barratt <cbarratt@users.sourceforge.net>
#
# COPYRIGHT
-# Copyright (C) 2001-2003 Craig Barratt
+# Copyright (C) 2001-2009 Craig Barratt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
#
#========================================================================
#
-# Version 2.1.0_CVS, released 8 Feb 2004.
+# Version 3.2.0, released 31 Jul 2010.
#
# See http://backuppc.sourceforge.net.
#
# Always unlink any current file in case it is already linked
#
unlink($fileName) if ( -f $fileName );
+ if ( $fileName =~ m{(.*)/.+} && !-d $1 ) {
+ my $newDir = $1;
+ eval { mkpath($newDir, 0, 0777) };
+ if ( $@ ) {
+ push(@{$self->{errors}}, "Unable to create directory $newDir for $self->{fileName}");
+ }
+ }
return $self;
}
-my $BufSize = 1048576; # 1MB or 2^20
-my $MaxFiles = 20;
+my $BufSize = 1048576; # 1MB or 2^20
+my $MaxFiles = 20; # max number of compare files open at one time
sub write
{
#
if ( !defined($dataRef) && !defined($a->{digest})
&& $a->{fileSize} != length($a->{data}) ) {
+ #my $newSize = length($a->{data});
+ #print("Fixing file size from $a->{fileSize} to $newSize\n");
$a->{fileSize} = length($a->{data});
}
if ( !defined($a->{base} = $a->{bpc}->MD52Path($a->{digest},
$a->{compress})) ) {
push(@{$a->{errors}}, "Unable to get path from '$a->{digest}'"
- . " for $a->{fileName}\n");
+ . " for $a->{fileName}");
} else {
while ( @{$a->{files}} < $MaxFiles ) {
my $fh;
my $fileName = $a->{fileCnt} < 0 ? $a->{base}
: "$a->{base}_$a->{fileCnt}";
last if ( !-f $fileName );
+ #
+ # Don't attempt to match pool files that already
+ # have too many hardlinks. Also, don't match pool
+ # files with only one link since starting in
+ # BackupPC v3.0, BackupPC_nightly could be running
+ # in parallel (and removing those files). This doesn't
+ # eliminate all possible race conditions, but just
+ # reduces the odds. Other design steps eliminate
+ # the remaining race conditions of linking vs
+ # removing.
+ #
if ( (stat(_))[3] >= $a->{hardLinkMax}
+ || (stat(_))[3] <= 1
|| !defined($fh = BackupPC::FileZIO->open($fileName, 0,
$a->{compress})) ) {
$a->{fileCnt}++;
1, $a->{compress});
if ( !defined($a->{fhOut}) ) {
push(@{$a->{errors}}, "Unable to open $a->{fileName}"
- . " for writing\n");
+ . " for writing");
}
}
}
if ( !$a->{files}[$i]->{fh}->rewind() ) {
push(@{$a->{errors}},
"Unable to rewind $a->{files}[$i]->{name}"
- . " for compare\n");
+ . " for compare");
}
$match = $a->filePartialCompare($a->{files}[$i]->{fh}, $fh,
$a->{nWrite}, $dataLen, \$a->{data});
if ( !defined($a->{fhOut}) ) {
push(@{$a->{errors}},
"Unable to open $a->{fileName}"
- . " for writing\n");
+ . " for writing");
} else {
if ( !$a->{files}[$i]->{fh}->rewind() ) {
push(@{$a->{errors}},
"Unable to rewind"
- . " $a->{files}[$i]->{name} for copy\n");
+ . " $a->{files}[$i]->{name} for copy");
}
$a->filePartialCopy($a->{files}[$i]->{fh}, $a->{fhOut},
$a->{nWrite});
my $n = $a->{fhOut}->write(\$a->{data});
if ( $n != $dataLen ) {
push(@{$a->{errors}}, "Unable to write $dataLen bytes to"
- . " $a->{fileName} (got $n)\n");
+ . " $a->{fileName} (got $n)");
}
}
$a->{nWrite} += $dataLen;
|| !defined($fh = BackupPC::FileZIO->open($fileName, 0,
$a->{compress})) ) {
push(@{$a->{errors}}, "Can't rename $a->{fileName} -> $fileName"
- . " or open during size fixup\n");
+ . " or open during size fixup");
}
+ #print("Using temporary name $fileName\n");
} elsif ( defined($a->{files}) && defined($a->{files}[0]) ) {
#
# We haven't written anything yet, so just use the
#
$fh = $a->{files}[0]->{fh};
$fh->rewind;
+ #print("Using compare file $a->{files}[0]->{name}\n");
}
if ( defined($fh) ) {
my $poolWrite = BackupPC::PoolWrite->new($a->{bpc}, $a->{fileName},
if ( $n != $thisRead ) {
push(@{$a->{errors}},
"Unable to read $thisRead bytes during resize"
- . " from temp $fileName (got $n)\n");
+ . " from temp $fileName (got $n)");
last;
}
$poolWrite->write(\$data);
}
}
- #
- # Close the compare files
- #
- foreach my $f ( @{$a->{files}} ) {
- $f->{fh}->close();
- }
-
if ( $a->{fileSize} == 0 ) {
#
# Simply create an empty file
local(*OUT);
if ( !open(OUT, ">", $a->{fileName}) ) {
push(@{$a->{errors}}, "Can't open $a->{fileName} for empty"
- . " output\n");
+ . " output");
} else {
close(OUT);
}
+ #
+ # Close the compare files
+ #
+ foreach my $f ( @{$a->{files}} ) {
+ $f->{fh}->close();
+ }
return (1, $a->{digest}, -s $a->{fileName}, $a->{errors});
} elsif ( defined($a->{fhOut}) ) {
$a->{fhOut}->close();
+ #
+ # Close the compare files
+ #
+ foreach my $f ( @{$a->{files}} ) {
+ $f->{fh}->close();
+ }
return (0, $a->{digest}, -s $a->{fileName}, $a->{errors});
} else {
if ( @{$a->{files}} == 0 ) {
push(@{$a->{errors}}, "Botch, no matches on $a->{fileName}"
- . " ($a->{digest})\n");
+ . " ($a->{digest})");
} elsif ( @{$a->{files}} > 1 ) {
#
# This is no longer a real error because $Conf{HardLinkMax}
#}
#push(@{$a->{errors}}, $str);
}
- #print(" Linking $a->{fileName} to $a->{files}[0]->{name}\n");
- if ( @{$a->{files}} && !link($a->{files}[0]->{name}, $a->{fileName}) ) {
- push(@{$a->{errors}}, "Can't link $a->{fileName} to"
- . " $a->{files}[0]->{name}\n");
+ for ( my $i = 0 ; $i < @{$a->{files}} ; $i++ ) {
+ if ( link($a->{files}[$i]->{name}, $a->{fileName}) ) {
+ #print(" Linked $a->{fileName} to $a->{files}[$i]->{name}\n");
+ #
+ # Close the compare files
+ #
+ foreach my $f ( @{$a->{files}} ) {
+ $f->{fh}->close();
+ }
+ return (1, $a->{digest}, -s $a->{fileName}, $a->{errors});
+ }
}
- return (1, $a->{digest}, -s $a->{fileName}, $a->{errors});
+ #
+ # We were unable to link to the pool. Either we're at the
+ # hardlink max, or the pool file got deleted. Recover by
+ # writing the matching file, since we still have an open
+ # handle.
+ #
+ for ( my $i = 0 ; $i < @{$a->{files}} ; $i++ ) {
+ if ( !$a->{files}[$i]->{fh}->rewind() ) {
+ push(@{$a->{errors}},
+ "Unable to rewind $a->{files}[$i]->{name}"
+ . " for copy after link fail");
+ next;
+ }
+ $a->{fhOut} = BackupPC::FileZIO->open($a->{fileName},
+ 1, $a->{compress});
+ if ( !defined($a->{fhOut}) ) {
+ push(@{$a->{errors}},
+ "Unable to open $a->{fileName}"
+ . " for writing after link fail");
+ } else {
+ $a->filePartialCopy($a->{files}[$i]->{fh}, $a->{fhOut},
+ $a->{nWrite});
+ $a->{fhOut}->close;
+ }
+ last;
+ }
+ #
+ # Close the compare files
+ #
+ foreach my $f ( @{$a->{files}} ) {
+ $f->{fh}->close();
+ }
+ return (0, $a->{digest}, -s $a->{fileName}, $a->{errors});
}
}
if ( $n != $thisRead ) {
push(@{$a->{errors}},
"Unable to read $thisRead bytes from "
- . $fhIn->name . " (got $n)\n");
+ . $fhIn->name . " (got $n)");
return;
}
$n = $fhOut->write(\$data, $thisRead);
if ( $n != $thisRead ) {
push(@{$a->{errors}},
"Unable to write $thisRead bytes to "
- . $fhOut->name . " (got $n)\n");
+ . $fhOut->name . " (got $n)");
return;
}
$nRead += $thisRead;
$n = $fh0->read(\$data0, $thisRead);
if ( $n != $thisRead ) {
push(@{$a->{errors}}, "Unable to read $thisRead bytes from "
- . $fh0->name . " (got $n)\n");
+ . $fh0->name . " (got $n)");
return;
}
$n = $fh1->read(\$data1, $thisRead);
return 1;
}
+#
+# LinkOrCopy() does a hardlink from oldFile to newFile.
+#
+# If that fails (because there are too many links on oldFile)
+# then oldFile is copied to newFile, and the pool stats are
+# returned to be added to the new file list. That allows
+# BackupPC_link to try again, and to create a new pool file
+# if necessary.
+#
+sub LinkOrCopy
+{
+ my($bpc, $oldFile, $oldFileComp, $newFile, $newFileComp) = @_;
+ my($nRead, $data);
+
+ unlink($newFile) if ( -f $newFile );
+ #
+ # Try to link if hardlink limit is ok, and compression types
+ # are the same
+ #
+ return (1, undef) if ( (stat($oldFile))[3] < $bpc->{Conf}{HardLinkMax}
+ && !$oldFileComp == !$newFileComp
+ && link($oldFile, $newFile) );
+ #
+ # There are too many links on oldFile, or compression
+ # type if different, so now we have to copy it.
+ #
+ # We need to compute the file size, which is expensive
+ # since we need to read the file twice. That's probably
+ # ok since the hardlink limit is rarely hit.
+ #
+ my $readFd = BackupPC::FileZIO->open($oldFile, 0, $oldFileComp);
+ if ( !defined($readFd) ) {
+ return (0, undef, undef, undef, ["LinkOrCopy: can't open $oldFile"]);
+ }
+ while ( $readFd->read(\$data, $BufSize) > 0 ) {
+ $nRead += length($data);
+ }
+ $readFd->rewind();
+
+ my $poolWrite = BackupPC::PoolWrite->new($bpc, $newFile,
+ $nRead, $newFileComp);
+ while ( $readFd->read(\$data, $BufSize) > 0 ) {
+ $poolWrite->write(\$data);
+ }
+ my($exists, $digest, $outSize, $errs) = $poolWrite->close;
+
+ return ($exists, $digest, $nRead, $outSize, $errs);
+}
+
1;