# Craig Barratt <cbarratt@users.sourceforge.net>
#
# COPYRIGHT
-# Copyright (C) 2001 Craig Barratt
+# Copyright (C) 2001-2003 Craig Barratt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
#
#========================================================================
#
-# Version 2.0.0_CVS, released 3 Feb 2003.
+# Version 3.0.0alpha, released 23 Jan 2006.
#
# See http://backuppc.sourceforge.net.
#
return $self;
}
-my $BufSize = 1048576; # 1MB or 2^20
-my $MaxFiles = 20;
+my $BufSize = 1048576; # 1MB or 2^20
+my $MaxFiles = 20; # max number of compare files open at one time
sub write
{
return if ( $a->{eof} );
$a->{data} .= $$dataRef if ( defined($dataRef) );
return if ( length($a->{data}) < $BufSize && defined($dataRef) );
- if ( !defined($a->{digest}) && $a->{fileSize} > 0 ) {
+
+ #
+ # Correct the fileSize if it is wrong (rsync might transfer
+ # a file whose length is different to the length sent with the
+ # file list if the file changes between the file list sending
+ # and the file sending). Here we only catch the case where
+ # we haven't computed the digest (ie: we have written no more
+ # than $BufSize). We catch the big file case below.
+ #
+ if ( !defined($dataRef) && !defined($a->{digest})
+ && $a->{fileSize} != length($a->{data}) ) {
+ #my $newSize = length($a->{data});
+ #print("Fixing file size from $a->{fileSize} to $newSize\n");
+ $a->{fileSize} = length($a->{data});
+ }
+
+ if ( !defined($a->{digest}) && length($a->{data}) > 0 ) {
#
# build a list of all the candidate matching files
#
my $md5 = Digest::MD5->new;
+ $a->{fileSize} = length($a->{data})
+ if ( $a->{fileSize} < length($a->{data}) );
$a->{digest} = $a->{bpc}->Buffer2MD5($md5, $a->{fileSize}, \$a->{data});
if ( !defined($a->{base} = $a->{bpc}->MD52Path($a->{digest},
$a->{compress})) ) {
}
}
my $dataLen = length($a->{data});
- if ( !defined($a->{fhOut}) && $a->{fileSize} > 0 ) {
+ if ( !defined($a->{fhOut}) && length($a->{data}) > 0 ) {
#
# See if the new chunk of data continues to match the
# candidate files.
# We are at EOF, so finish up
#
$a->{eof} = 1;
+
+ #
+ # Make sure the fileSize was correct. See above for comments about
+ # rsync.
+ #
+ if ( $a->{nWrite} != $a->{fileSize} ) {
+ #
+ # Oops, fileSize was wrong, so our MD5 digest was wrong and our
+ # effort to match files likely failed. This is ugly, but our
+ # only choice at this point is to re-write the entire file with
+ # the correct length. We need to rename the file, open it for
+ # reading, and then re-write the file with the correct length.
+ #
+
+ #print("Doing big file fixup ($a->{fileSize} != $a->{nWrite})\n");
+
+ my($fh, $fileName);
+ $a->{fileSize} = $a->{nWrite};
+
+ if ( defined($a->{fhOut}) ) {
+ if ( $a->{fileName} =~ /(.*)\// ) {
+ $fileName = $1;
+ } else {
+ $fileName = ".";
+ }
+ #
+ # Find a unique target temporary file name
+ #
+ my $i = 0;
+ while ( -f "$fileName/t$$.$i" ) {
+ $i++;
+ }
+ $fileName = "$fileName/t$$.$i";
+ $a->{fhOut}->close();
+ if ( !rename($a->{fileName}, $fileName)
+ || !defined($fh = BackupPC::FileZIO->open($fileName, 0,
+ $a->{compress})) ) {
+ push(@{$a->{errors}}, "Can't rename $a->{fileName} -> $fileName"
+ . " or open during size fixup\n");
+ }
+ #print("Using temporary name $fileName\n");
+ } elsif ( defined($a->{files}) && defined($a->{files}[0]) ) {
+ #
+ # We haven't written anything yet, so just use the
+ # compare file to copy from.
+ #
+ $fh = $a->{files}[0]->{fh};
+ $fh->rewind;
+ #print("Using compare file $a->{files}[0]->{name}\n");
+ }
+ if ( defined($fh) ) {
+ my $poolWrite = BackupPC::PoolWrite->new($a->{bpc}, $a->{fileName},
+ $a->{fileSize}, $a->{compress});
+ my $nRead = 0;
+
+ while ( $nRead < $a->{fileSize} ) {
+ my $thisRead = $a->{fileSize} - $nRead < $BufSize
+ ? $a->{fileSize} - $nRead : $BufSize;
+ my $data;
+ my $n = $fh->read(\$data, $thisRead);
+ if ( $n != $thisRead ) {
+ push(@{$a->{errors}},
+ "Unable to read $thisRead bytes during resize"
+ . " from temp $fileName (got $n)\n");
+ last;
+ }
+ $poolWrite->write(\$data);
+ $nRead += $thisRead;
+ }
+ $fh->close;
+ unlink($fileName) if ( defined($fileName) );
+ if ( @{$a->{errors}} ) {
+ $poolWrite->close;
+ return (0, $a->{digest}, -s $a->{fileName}, $a->{errors});
+ } else {
+ return $poolWrite->close;
+ }
+ }
+ }
+
+ #
+ # Close the compare files
+ #
foreach my $f ( @{$a->{files}} ) {
$f->{fh}->close();
}
+
if ( $a->{fileSize} == 0 ) {
#
# Simply create an empty file
return $a->write(undef);
}
+#
+# Abort a pool write
+#
+sub abort
+{
+ my($a) = @_;
+
+ if ( defined($a->{fhOut}) ) {
+ $a->{fhOut}->close();
+ unlink($a->{fileName});
+ }
+ foreach my $f ( @{$a->{files}} ) {
+ $f->{fh}->close();
+ }
+ $a->{files} = [];
+}
+
#
# Copy $nBytes from files $fhIn to $fhOut.
#
return 1;
}
+#
+# LinkOrCopy() does a hardlink from oldFile to newFile.
+#
+# If that fails (because there are too many links on oldFile)
+# then oldFile is copied to newFile, and the pool stats are
+# returned to be added to the new file list. That allows
+# BackupPC_link to try again, and to create a new pool file
+# if necessary.
+#
+sub LinkOrCopy
+{
+ my($bpc, $oldFile, $oldFileComp, $newFile, $newFileComp) = @_;
+ my($nRead, $data);
+
+ unlink($newFile) if ( -f $newFile );
+ #
+ # Try to link if hardlink limit is ok, and compression types
+ # are the same
+ #
+ return (1, undef) if ( (stat($oldFile))[3] < $bpc->{Conf}{HardLinkMax}
+ && !$oldFileComp == !$newFileComp
+ && link($oldFile, $newFile) );
+ #
+ # There are too many links on oldFile, or compression
+ # type if different, so now we have to copy it.
+ #
+ # We need to compute the file size, which is expensive
+ # since we need to read the file twice. That's probably
+ # ok since the hardlink limit is rarely hit.
+ #
+ my $readFd = BackupPC::FileZIO->open($oldFile, 0, $oldFileComp);
+ if ( !defined($readFd) ) {
+ return (0, undef, undef, undef, ["LinkOrCopy: can't open $oldFile"]);
+ }
+ while ( $readFd->read(\$data, $BufSize) > 0 ) {
+ $nRead += length($data);
+ }
+ $readFd->rewind();
+
+ my $poolWrite = BackupPC::PoolWrite->new($bpc, $newFile,
+ $nRead, $newFileComp);
+ while ( $readFd->read(\$data, $BufSize) > 0 ) {
+ $poolWrite->write(\$data);
+ }
+ my($exists, $digest, $outSize, $errs) = $poolWrite->close;
+
+ return ($exists, $digest, $nRead, $outSize, $errs);
+}
+
1;