# Craig Barratt <cbarratt@users.sourceforge.net>
#
# COPYRIGHT
-# Copyright (C) 2002 Craig Barratt
+# Copyright (C) 2002-2003 Craig Barratt
#
#========================================================================
#
-# Version 2.0.0_CVS, released 3 Feb 2003.
+# Version 2.1.0, released 20 Jun 2004.
#
# See http://backuppc.sourceforge.net.
#
use File::Path;
use BackupPC::Attrib qw(:all);
use BackupPC::View;
+use BackupPC::Xfer::RsyncDigest qw(:all);
use BackupPC::PoolWrite;
-use BackupPC::PoolWrite;
-use Data::Dumper;
use constant S_IFMT => 0170000; # type of file
use constant S_IFDIR => 0040000; # directory
attrib => {},
logHandler => \&logHandler,
stats => {
+ errorCnt => 0,
TotalFileCnt => 0,
TotalFileSize => 0,
ExistFileCnt => 0,
$fio->{backups});
$fio->{full} = $fio->{xfer}{type} eq "full" ? 1 : 0;
$fio->{newFilesFH} = $fio->{xfer}{newFilesFH};
+ $fio->{partialNum} = undef if ( !$fio->{full} );
return $fio;
}
{
my($fio, $sub) = @_;
$fio->{logHandler} = $sub;
+ BackupPC::Xfer::RsyncDigest->logHandlerSet($sub);
}
#
#
sub csumStart
{
- my($fio, $f, $needMD4) = @_;
+ my($fio, $f, $needMD4, $defBlkSize, $phase) = @_;
+ $defBlkSize ||= $fio->{blockSize};
my $attr = $fio->attribGet($f);
$fio->{file} = $f;
- $fio->csumEnd if ( defined($fio->{fh}) );
- return if ( $attr->{type} != BPC_FTYPE_FILE );
- if ( !defined($fio->{fh} = BackupPC::FileZIO->open($attr->{fullPath},
- 0,
- $attr->{compress})) ) {
- $fio->log("Can't open $attr->{fullPath} (name=$f->{name})");
- return -1;
+ $fio->csumEnd if ( defined($fio->{csum}) );
+ return -1 if ( $attr->{type} != BPC_FTYPE_FILE );
+ #
+ # Rsync uses short checksums on the first phase. If the whole-file
+ # checksum fails, then the file is repeated with full checksums.
+ # So on phase 2 we verify the checksums if they are cached.
+ #
+ if ( ($phase > 0 || rand(1) < $fio->{cacheCheckProb})
+ && $attr->{compress}
+ && $fio->{checksumSeed} == RSYNC_CSUMSEED_CACHE ) {
+ my($err, $d, $blkSize) = BackupPC::Xfer::RsyncDigest->digestStart(
+ $attr->{fullPath}, $attr->{size}, 0,
+ $defBlkSize, $fio->{checksumSeed},
+ 0, $attr->{compress}, 0);
+ my($isCached, $isInvalid) = $d->isCached;
+ if ( $fio->{logLevel} >= 5 ) {
+ $fio->log("$attr->{fullPath} verify; cached = $isCached,"
+ . " invalid = $isInvalid, phase = $phase");
+ }
+ if ( $isCached || $isInvalid ) {
+ my $ret = BackupPC::Xfer::RsyncDigest->digestAdd(
+ $attr->{fullPath}, $blkSize,
+ $fio->{checksumSeed}, 1 # verify
+ );
+ if ( $ret != 1 ) {
+ $fio->log("Bad cached digest for $attr->{fullPath} ($ret);"
+ . " fixed");
+ $fio->{stats}{errorCnt}++;
+ } else {
+ $fio->log("$f->{name}: verified cached digest")
+ if ( $fio->{logLevel} >= 2 );
+ }
+ }
+ $d->digestEnd;
}
- if ( $needMD4) {
- $fio->{csumDigest} = File::RsyncP::Digest->new;
- $fio->{csumDigest}->add(pack("V", $fio->{checksumSeed}));
- } else {
- delete($fio->{csumDigest});
+ (my $err, $fio->{csum}, my $blkSize)
+ = BackupPC::Xfer::RsyncDigest->digestStart($attr->{fullPath},
+ $attr->{size}, 0, $defBlkSize, $fio->{checksumSeed},
+ $needMD4, $attr->{compress}, 1);
+ if ( $fio->{logLevel} >= 5 ) {
+ my($isCached, $invalid) = $fio->{csum}->isCached;
+ $fio->log("$attr->{fullPath} cache = $isCached,"
+ . " invalid = $invalid, phase = $phase");
+ }
+ if ( $err ) {
+ $fio->log("Can't get rsync digests from $attr->{fullPath}"
+ . " (err=$err, name=$f->{name})");
+ $fio->{stats}{errorCnt}++;
+ return -1;
}
+ return $blkSize;
}
sub csumGet
$num ||= 100;
$csumLen ||= 16;
-
- return if ( !defined($fio->{fh}) );
- if ( $fio->{fh}->read(\$fileData, $blockSize * $num) <= 0 ) {
- return;
- }
- $fio->{csumDigest}->add($fileData) if ( defined($fio->{csumDigest}) );
- $fio->log(sprintf("%s: getting csum ($num,$csumLen,%d,0x%x)\n",
- $fio->{file}{name},
- length($fileData),
- $fio->{checksumSeed}))
- if ( $fio->{logLevel} >= 10 );
- return $fio->{digest}->blockDigest($fileData, $blockSize,
- $csumLen, $fio->{checksumSeed});
+ return if ( !defined($fio->{csum}) );
+ return $fio->{csum}->digestGet($num, $csumLen);
}
sub csumEnd
{
my($fio) = @_;
- return if ( !defined($fio->{fh}) );
- #
- # make sure we read the entire file for the file MD4 digest
- #
- if ( defined($fio->{csumDigest}) ) {
- my $fileData;
- while ( $fio->{fh}->read(\$fileData, 65536) > 0 ) {
- $fio->{csumDigest}->add($fileData);
- }
- }
- $fio->{fh}->close();
- delete($fio->{fh});
- return $fio->{csumDigest}->digest if ( defined($fio->{csumDigest}) );
+ return if ( !defined($fio->{csum}) );
+ return $fio->{csum}->digestEnd();
}
sub readStart
0,
$attr->{compress})) ) {
$fio->log("Can't open $attr->{fullPath} (name=$f->{name})");
+ $fio->{stats}{errorCnt}++;
return;
}
$fio->log("$f->{name}: opened for read") if ( $fio->{logLevel} >= 4 );
my($fio, $checksumSeed) = @_;
$fio->{checksumSeed} = $checksumSeed;
+ $fio->log("Checksum caching enabled (checksumSeed = $checksumSeed)")
+ if ( $fio->{logLevel} >= 1 && $checksumSeed == RSYNC_CSUMSEED_CACHE );
+ $fio->log("Checksum seed is $checksumSeed")
+ if ( $fio->{logLevel} >= 2 && $checksumSeed != RSYNC_CSUMSEED_CACHE );
}
sub dirs
#
$fio->{viewCache}{$shareM}
= $fio->{view}->dirAttrib($fio->{viewNum}, $share, $dir);
+ #
+ # also cache partial backup attrib data too
+ #
+ if ( defined($fio->{partialNum}) ) {
+ foreach my $d ( keys(%{$fio->{partialCache}}) ) {
+ delete($fio->{partialCache}{$d}) if ( $shareM !~ m{^\Q$d/} );
+ }
+ $fio->{partialCache}{$shareM}
+ = $fio->{view}->dirAttrib($fio->{partialNum}, $share, $dir);
+ }
}
-sub attribGet
+sub attribGetWhere
{
my($fio, $f) = @_;
my($dir, $fname, $share, $shareM);
}
$fio->viewCacheDir($share, $dir);
$shareM .= "/$dir" if ( $dir ne "" );
- return $fio->{viewCache}{$shareM}{$fname};
+ if ( defined(my $attr = $fio->{viewCache}{$shareM}{$fname}) ) {
+ return ($attr, 0);
+ } elsif ( defined(my $attr = $fio->{partialCache}{$shareM}{$fname}) ) {
+ return ($attr, 1);
+ } else {
+ return;
+ }
+}
+
+sub attribGet
+{
+ my($fio, $f) = @_;
+
+ my($attr) = $fio->attribGetWhere($f);
+ return $attr;
}
sub mode2type
return if ( !defined($fio->{attrib}{$d}) );
#
# Set deleted files in the attributes. Any file in the view
- # that doesn't have attributes is deleted. All files sent by
- # rsync have attributes temporarily set so we can do deletion
- # detection. We also prune these temporary attributes.
+ # that doesn't have attributes is flagged as deleted for
+ # incremental dumps. All files sent by rsync have attributes
+ # temporarily set so we can do deletion detection. We also
+ # prune these temporary attributes.
#
if ( $d ne "" ) {
my $dir;
name => $name,
}) if ( $fio->{logLevel} >= 2 );
}
- } else {
+ } elsif ( !$fio->{full} ) {
##print("Delete file $f\n");
$fio->logFileAction("delete", {
%{$fio->{viewCache}{$d}{$f}},
File::Path::mkpath($path, 0, 0777) if ( !-d $path );
return $fio->attribSet($f) if ( -d $path );
$fio->log("Can't create directory $path");
+ $fio->{stats}{errorCnt}++;
return -1;
}
}
#
-# Later we'll use this function to complete a prior unfinished dump.
-# We'll do an incremental on the part we have already, and then a
-# full or incremental against the rest.
+# If there is a partial and we are doing a full, we do an incremental
+# against the partial and a full against the rest. This subroutine
+# is how we tell File::RsyncP which files to ignore attributes on
+# (ie: against the partial dump we do consider the attributes, but
+# otherwise we ignore attributes).
#
sub ignoreAttrOnFile
{
- return undef;
+ my($fio, $f) = @_;
+
+ return if ( !defined($fio->{partialNum}) );
+ my($attr, $isPartial) = $fio->attribGetWhere($f);
+ $fio->log("$f->{name}: just checking attributes from partial")
+ if ( $isPartial && $fio->{logLevel} >= 5 );
+ return !$isPartial;
+}
+
+#
+# This is called by File::RsyncP when a file is skipped because the
+# attributes match.
+#
+sub attrSkippedFile
+{
+ my($fio, $f, $attr) = @_;
+
+ #
+ # Unless this is a partial, this is normal so ignore it.
+ #
+ return if ( !defined($fio->{partialNum}) );
+
+ $fio->log("$f->{name}: skipped in partial; adding link")
+ if ( $fio->{logLevel} >= 5 );
+ $fio->{rxLocalAttr} = $attr;
+ $fio->{rxFile} = $f;
+ $fio->{rxSize} = $attr->{size};
+ delete($fio->{rxInFd});
+ delete($fio->{rxOutFd});
+ delete($fio->{rxDigest});
+ delete($fio->{rxInData});
+ return $fio->fileDeltaRxDone();
}
#
. " ($fio->{rxFile}{size} vs $rxSize)")
if ( $fio->{logLevel} >= 5 );
}
+ #
+ # If compression was off and now on, or on and now off, then
+ # don't do an exact match.
+ #
+ if ( defined($fio->{rxLocalAttr})
+ && !$fio->{rxLocalAttr}{compress} != !$fio->{xfer}{compress} ) {
+ $fio->{rxMatchBlk} = undef; # compression changed, so no file match
+ $fio->log("$fio->{rxFile}{name}: compression changed, so no match"
+ . " ($fio->{rxLocalAttr}{compress} vs $fio->{xfer}{compress})")
+ if ( $fio->{logLevel} >= 4 );
+ }
delete($fio->{rxInFd});
delete($fio->{rxOutFd});
delete($fio->{rxDigest});
$rxOutFile, $fio->{rxFile}{size},
$fio->{xfer}{compress});
$fio->log("$fio->{rxFile}{name}: opening output file $rxOutFile")
- if ( $fio->{logLevel} >= 10 );
+ if ( $fio->{logLevel} >= 9 );
$fio->{rxOutFile} = $rxOutFile;
$fio->{rxOutFileRel} = $rxOutFileRel;
$fio->{rxDigest} = File::RsyncP::Digest->new;
# Need to copy the sequence of blocks that matched. If the file
# is compressed we need to make a copy of the uncompressed file,
# since the compressed file is not seekable. Future optimizations
- # would be to keep the uncompressed file in memory (eg, up to say
- # 10MB), only create an uncompressed copy if the matching
+ # could include only creating an uncompressed copy if the matching
# blocks were not monotonic, and to only do this if there are
# matching blocks (eg, maybe the entire file is new).
#
0,
$attr->{compress})) ) {
$fio->log("Can't open $attr->{fullPath}");
+ $fio->{stats}{errorCnt}++;
return -1;
}
- if ( $attr->{size} < 10 * 1024 * 1024 ) {
+ if ( $attr->{size} < 16 * 1024 * 1024 ) {
#
- # Cache the entire old file if it is less than 10MB
+ # Cache the entire old file if it is less than 16MB
#
my $data;
$fio->{rxInData} = "";
- while ( $fh->read(\$data, 10 * 1024 * 1024) > 0 ) {
+ while ( $fh->read(\$data, 16 * 1024 * 1024) > 0 ) {
$fio->{rxInData} .= $data;
}
+ $fio->log("$attr->{fullPath}: cached all $attr->{size}"
+ . " bytes")
+ if ( $fio->{logLevel} >= 9 );
} else {
#
# Create and write a temporary output file
if ( -f "$fio->{outDirSh}RStmp" );
if ( open(F, "+>", "$fio->{outDirSh}RStmp") ) {
my $data;
+ my $byteCnt = 0;
+ binmode(F);
while ( $fh->read(\$data, 1024 * 1024) > 0 ) {
if ( syswrite(F, $data) != length($data) ) {
$fio->log(sprintf("Can't write len=%d to %s",
length($data) , "$fio->{outDirSh}RStmp"));
$fh->close;
+ $fio->{stats}{errorCnt}++;
return -1;
}
+ $byteCnt += length($data);
}
$fio->{rxInFd} = *F;
$fio->{rxInName} = "$fio->{outDirSh}RStmp";
- seek($fio->{rxInFd}, 0, 0);
+ sysseek($fio->{rxInFd}, 0, 0);
+ $fio->log("$attr->{fullPath}: copied $byteCnt,"
+ . "$attr->{size} bytes to $fio->{rxInName}")
+ if ( $fio->{logLevel} >= 9 );
} else {
$fio->log("Unable to open $fio->{outDirSh}RStmp");
$fh->close;
+ $fio->{stats}{errorCnt}++;
return -1;
}
}
$fh->close;
} else {
if ( open(F, "<", $attr->{fullPath}) ) {
+ binmode(F);
$fio->{rxInFd} = *F;
$fio->{rxInName} = $attr->{fullPath};
} else {
$fio->log("Unable to open $attr->{fullPath}");
+ $fio->{stats}{errorCnt}++;
return -1;
}
}
my $lastBlk = $fio->{rxMatchNext} - 1;
$fio->log("$fio->{rxFile}{name}: writing blocks $fio->{rxMatchBlk}.."
. "$lastBlk")
- if ( $fio->{logLevel} >= 10 );
+ if ( $fio->{logLevel} >= 9 );
my $seekPosn = $fio->{rxMatchBlk} * $fio->{rxBlkSize};
- if ( defined($fio->{rxInFd}) && !seek($fio->{rxInFd}, $seekPosn, 0) ) {
- $fio->log("Unable to seek $attr->{fullPath} to $seekPosn");
+ if ( defined($fio->{rxInFd})
+ && !sysseek($fio->{rxInFd}, $seekPosn, 0) ) {
+ $fio->log("Unable to seek $attr->{rxInName} to $seekPosn");
+ $fio->{stats}{errorCnt}++;
return -1;
}
my $cnt = $fio->{rxMatchNext} - $fio->{rxMatchBlk};
}
if ( defined($fio->{rxInData}) ) {
$data = substr($fio->{rxInData}, $seekPosn, $len);
+ $seekPosn += $len;
} else {
- if ( sysread($fio->{rxInFd}, $data, $len) != $len ) {
- $fio->log("Unable to read $len bytes from"
- . " $fio->{rxInName} "
- . "($i,$thisCnt,$fio->{rxBlkCnt})");
+ my $got = sysread($fio->{rxInFd}, $data, $len);
+ if ( $got != $len ) {
+ my $inFileSize = -s $fio->{rxInName};
+ $fio->log("Unable to read $len bytes from $fio->{rxInName}"
+ . " got=$got, seekPosn=$seekPosn"
+ . " ($i,$thisCnt,$fio->{rxBlkCnt},$inFileSize"
+ . ",$attr->{size})");
+ $fio->{stats}{errorCnt}++;
return -1;
}
+ $seekPosn += $len;
}
$fio->{rxOutFd}->write(\$data);
$fio->{rxDigest}->add($data);
#
my $len = length($newData);
$fio->log("$fio->{rxFile}{name}: writing $len bytes new data")
- if ( $fio->{logLevel} >= 10 );
+ if ( $fio->{logLevel} >= 9 );
$fio->{rxOutFd}->write(\$newData);
$fio->{rxDigest}->add($newData);
$fio->{rxSize} += length($newData);
#
sub fileDeltaRxDone
{
- my($fio, $md4) = @_;
+ my($fio, $md4, $phase) = @_;
my $name = $1 if ( $fio->{rxFile}{name} =~ /(.*)/ );
+ my $ret;
- if ( !defined($fio->{rxDigest}) ) {
- #
- # File was exact match, but we still need to verify the
- # MD4 checksum. Therefore open and read the file.
- #
- $fio->{rxDigest} = File::RsyncP::Digest->new;
- $fio->{rxDigest}->add(pack("V", $fio->{checksumSeed}));
- my $attr = $fio->{rxLocalAttr};
- if ( defined($attr) && defined(my $fh = BackupPC::FileZIO->open(
- $attr->{fullPath},
- 0,
- $attr->{compress})) ) {
- my $data;
- while ( $fh->read(\$data, 4 * 65536) > 0 ) {
- $fio->{rxDigest}->add($data);
- $fio->{rxSize} += length($data);
- }
- $fh->close;
- } else {
- # ERROR
- }
- $fio->log("$name got exact match")
- if ( $fio->{logLevel} >= 5 );
- }
close($fio->{rxInFd}) if ( defined($fio->{rxInFd}) );
unlink("$fio->{outDirSh}RStmp") if ( -f "$fio->{outDirSh}RStmp" );
- my $newDigest = $fio->{rxDigest}->digest;
- if ( $fio->{logLevel} >= 3 ) {
- my $md4Str = unpack("H*", $md4);
- my $newStr = unpack("H*", $newDigest);
- $fio->log("$name got digests $md4Str vs $newStr")
- }
- if ( $md4 ne $newDigest ) {
- $fio->log("$name md4 doesn't match")
- if ( $fio->{logLevel} >= 1 );
- if ( defined($fio->{rxOutFd}) ) {
- $fio->{rxOutFd}->close;
- unlink($fio->{rxOutFile});
+
+ #
+ # Check the final md4 digest
+ #
+ if ( defined($md4) ) {
+ my $newDigest;
+ if ( !defined($fio->{rxDigest}) ) {
+ #
+ # File was exact match, but we still need to verify the
+ # MD4 checksum. Compute the md4 digest (or fetch the
+ # cached one.)
+ #
+ if ( defined(my $attr = $fio->{rxLocalAttr}) ) {
+ #
+ # block size doesn't matter: we're only going to
+ # fetch the md4 file digest, not the block digests.
+ #
+ my($err, $csum, $blkSize)
+ = BackupPC::Xfer::RsyncDigest->digestStart(
+ $attr->{fullPath}, $attr->{size},
+ 0, 2048, $fio->{checksumSeed}, 1,
+ $attr->{compress}, 1);
+ if ( $err ) {
+ $fio->log("Can't open $attr->{fullPath} for MD4"
+ . " check (err=$err, $name)");
+ $fio->{stats}{errorCnt}++;
+ } else {
+ if ( $fio->{logLevel} >= 5 ) {
+ my($isCached, $invalid) = $csum->isCached;
+ $fio->log("MD4 $attr->{fullPath} cache = $isCached,"
+ . " invalid = $invalid");
+ }
+ $newDigest = $csum->digestEnd;
+ }
+ $fio->{rxSize} = $attr->{size};
+ } else {
+ #
+ # Empty file; just create an empty file digest
+ #
+ $fio->{rxDigest} = File::RsyncP::Digest->new;
+ $fio->{rxDigest}->add(pack("V", $fio->{checksumSeed}));
+ $newDigest = $fio->{rxDigest}->digest;
+ }
+ $fio->log("$name got exact match") if ( $fio->{logLevel} >= 5 );
+ } else {
+ $newDigest = $fio->{rxDigest}->digest;
+ }
+ if ( $fio->{logLevel} >= 3 ) {
+ my $md4Str = unpack("H*", $md4);
+ my $newStr = unpack("H*", $newDigest);
+ $fio->log("$name got digests $md4Str vs $newStr")
+ }
+ if ( $md4 ne $newDigest ) {
+ if ( $phase > 0 ) {
+ $fio->log("$name: fatal error: md4 doesn't match on retry;"
+ . " file removed");
+ } else {
+ $fio->log("$name: md4 doesn't match: will retry in phase 1;"
+ . " file removed");
+ }
+ $fio->{stats}{errorCnt}++;
+ if ( defined($fio->{rxOutFd}) ) {
+ $fio->{rxOutFd}->close;
+ unlink($fio->{rxOutFile});
+ }
+ delete($fio->{rxFile});
+ delete($fio->{rxOutFile});
+ return 1;
}
- return 1;
}
+
#
# One special case is an empty file: if the file size is
# zero we need to open the output file to create it.
. $fio->{bpc}->fileNameMangle($name);
if ( !link($attr->{fullPath}, $rxOutFile) ) {
$fio->log("Unable to link $attr->{fullPath} to $rxOutFile");
- return -1;
- }
- #
- # Cumulate the stats
- #
- $fio->{stats}{TotalFileCnt}++;
- $fio->{stats}{TotalFileSize} += $fio->{rxSize};
- $fio->{stats}{ExistFileCnt}++;
- $fio->{stats}{ExistFileSize} += $fio->{rxSize};
- $fio->{stats}{ExistFileCompSize} += -s $rxOutFile;
- $fio->{rxFile}{size} = $fio->{rxSize};
- return $fio->attribSet($fio->{rxFile});
+ $fio->{stats}{errorCnt}++;
+ $ret = -1;
+ } else {
+ #
+ # Cumulate the stats
+ #
+ $fio->{stats}{TotalFileCnt}++;
+ $fio->{stats}{TotalFileSize} += $fio->{rxSize};
+ $fio->{stats}{ExistFileCnt}++;
+ $fio->{stats}{ExistFileSize} += $fio->{rxSize};
+ $fio->{stats}{ExistFileCompSize} += -s $rxOutFile;
+ $fio->{rxFile}{size} = $fio->{rxSize};
+ $ret = $fio->attribSet($fio->{rxFile});
+ }
}
- }
- if ( defined($fio->{rxOutFd}) ) {
+ } else {
my $exist = $fio->processClose($fio->{rxOutFd},
$fio->{rxOutFileRel},
$fio->{rxSize}, 1);
$fio->logFileAction($exist ? "pool" : "create", $fio->{rxFile})
if ( $fio->{logLevel} >= 1 );
$fio->{rxFile}{size} = $fio->{rxSize};
- return $fio->attribSet($fio->{rxFile});
+ $ret = $fio->attribSet($fio->{rxFile});
}
delete($fio->{rxDigest});
delete($fio->{rxInData});
- return;
+ delete($fio->{rxFile});
+ delete($fio->{rxOutFile});
+ return $ret;
}
#
|| $type == BPC_FTYPE_BLOCKDEV
|| $type == BPC_FTYPE_SYMLINK ) {
my $fh = BackupPC::FileZIO->open($a->{fullPath}, 0, $a->{compress});
- my $str;
+ my($str, $rdSize);
if ( defined($fh) ) {
- if ( $fh->read(\$str, $a->{size} + 1) == $a->{size} ) {
- if ( $type == BPC_FTYPE_SYMLINK ) {
- #
- # Reconstruct symbolic link
- #
- $extraAttribs = { link => $str };
- } elsif ( $str =~ /(\d*),(\d*)/ ) {
- #
- # Reconstruct char or block special major/minor device num
- #
- $extraAttribs = { rdev => $1 * 256 + $2 };
- } else {
+ $rdSize = $fh->read(\$str, $a->{size} + 1024);
+ if ( $type == BPC_FTYPE_SYMLINK ) {
+ #
+ # Reconstruct symbolic link
+ #
+ $extraAttribs = { link => $str };
+ if ( $rdSize != $a->{size} ) {
# ERROR
- $fio->log("$name: unexpected file contents $str");
+ $fio->log("$name: can't read exactly $a->{size} bytes");
+ $fio->{stats}{errorCnt}++;
}
+ } elsif ( $str =~ /(\d*),(\d*)/ ) {
+ #
+ # Reconstruct char or block special major/minor device num
+ #
+ # Note: char/block devices have $a->{size} = 0, so we
+ # can't do an error check on $rdSize.
+ #
+ $extraAttribs = { rdev => $1 * 256 + $2 };
} else {
- # ERROR
- $fio->log("$name: can't read exactly $a->{size} bytes");
+ $fio->log("$name: unexpected special file contents $str");
+ $fio->{stats}{errorCnt}++;
}
$fh->close;
} else {
# ERROR
$fio->log("$name: can't open");
+ $fio->{stats}{errorCnt}++;
}
}
my $f = {
{
my($fio, $isChild) = @_;
+ #
+ # If we are aborting early, remove the last file since
+ # it was not complete
+ #
+ if ( $isChild && defined($fio->{rxFile}) ) {
+ unlink("$fio->{outDirSh}RStmp") if ( -f "$fio->{outDirSh}RStmp" );
+ if ( defined($fio->{rxFile}) ) {
+ unlink($fio->{rxOutFile});
+ $fio->log("finish: removing in-process file $fio->{rxFile}{name}");
+ }
+ }
+
#
# Flush the attributes if this is the child
#
- $fio->attribWrite(undef);
+ $fio->attribWrite(undef) if ( $isChild );
}
#sub is_tainted