#
#========================================================================
#
-# Version 2.1.0_CVS, released 8 Feb 2004.
+# Version 2.1.0, released 20 Jun 2004.
#
# See http://backuppc.sourceforge.net.
#
use BackupPC::Xfer::Rsync;
use Socket;
use File::Path;
+use File::Find;
use Getopt::Std;
###########################################################################
my %Conf = $bpc->Conf();
my $NeedPostCmd;
my $Hosts;
+my $SigName;
+my $Abort;
$bpc->ChildInit();
# For archive hosts we don't bother any further
#
if ($Conf{XferMethod} eq "archive" ) {
+ print(STDERR "Exiting because the XferMethod is set to archive\n")
+ if ( $opts{v} );
exit(0);
}
my $lastIncr = 0;
my $partialIdx = -1;
my $partialNum;
+my $lastPartial = 0;
if ( $Conf{FullPeriod} == -1 && !$opts{f} && !$opts{i}
|| $Conf{FullPeriod} == -2 ) {
+ print(STDERR "Exiting because backups are disabled with"
+ . " \$Conf{FullPeriod} = $Conf{FullPeriod}\n") if ( $opts{v} );
+ #
+ # Tell BackupPC to ignore old failed backups on hosts that
+ # have backups disabled.
+ #
+ print("backups disabled\n")
+ if ( defined($StatusHost{errorTime})
+ && $StatusHost{reason} ne "Reason_backup_done"
+ && time - $StatusHost{errorTime} > 4 * 24 * 3600 );
NothingToDo($needLink);
}
my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
my($currHours) = $hour + $min / 60 + $sec / 3600;
my $blackout;
+
#
- # Allow blackout to span midnight (specified by BlackoutHourBegin
- # being greater than BlackoutHourEnd)
+ # Handle backward compatibility with original separate scalar
+ # parameters.
#
- if ( $Conf{BlackoutHourBegin} > $Conf{BlackoutHourEnd} ) {
- $blackout = $Conf{BlackoutHourBegin} <= $currHours
- || $currHours <= $Conf{BlackoutHourEnd};
- if ( $currHours <= $Conf{BlackoutHourEnd} ) {
- #
- # This is after midnight, so decrement the weekday for the
- # weekday check (eg: Monday 11pm-1am means Monday 2300 to
- # Tuesday 0100, not Monday 2300-2400 plus Monday 0000-0100).
- #
- $wday--;
- $wday += 7 if ( $wday < 0 );
- }
- } else {
- $blackout = $Conf{BlackoutHourBegin} <= $currHours
- && $currHours <= $Conf{BlackoutHourEnd};
+ if ( defined($Conf{BlackoutHourBegin}) ) {
+ push(@{$Conf{BlackoutPeriods}},
+ {
+ hourBegin => $Conf{BlackoutHourBegin},
+ hourEnd => $Conf{BlackoutHourEnd},
+ weekDays => $Conf{BlackoutWeekDays},
+ }
+ );
}
- if ( $blackout && grep($_ == $wday, @{$Conf{BlackoutWeekDays}}) ) {
-# print(LOG $bpc->timeStamp, "skipping because of blackout"
-# . " (alive $StatusHost{aliveCnt} times)\n");
- print(STDERR "Skipping $client because of blackout\n")
- if ( $opts{v} );
- NothingToDo($needLink);
+ foreach my $p ( @{$Conf{BlackoutPeriods}} ) {
+ #
+ # Allow blackout to span midnight (specified by BlackoutHourBegin
+ # being greater than BlackoutHourEnd)
+ #
+ next if ( ref($p->{weekDays}) ne "ARRAY"
+ || !defined($p->{hourBegin})
+ || !defined($p->{hourEnd})
+ );
+ if ( $p->{hourBegin} > $p->{hourEnd} ) {
+ $blackout = $p->{hourBegin} <= $currHours
+ || $currHours <= $p->{hourEnd};
+ if ( $currHours <= $p->{hourEnd} ) {
+ #
+ # This is after midnight, so decrement the weekday for the
+ # weekday check (eg: Monday 11pm-1am means Monday 2300 to
+ # Tuesday 0100, not Monday 2300-2400 plus Monday 0000-0100).
+ #
+ $wday--;
+ $wday += 7 if ( $wday < 0 );
+ }
+ } else {
+ $blackout = $p->{hourBegin} <= $currHours
+ && $currHours <= $p->{hourEnd};
+ }
+ if ( $blackout && grep($_ == $wday, @{$p->{weekDays}}) ) {
+# print(LOG $bpc->timeStamp, "skipping because of blackout"
+# . " (alive $StatusHost{aliveCnt} times)\n");
+ print(STDERR "Skipping $client because of blackout\n")
+ if ( $opts{v} );
+ NothingToDo($needLink);
+ }
}
}
if ( !$opts{i} && !$opts{f} && $StatusHost{backoffTime} > time ) {
- printf(LOG "%sskipping because of user requested delay (%.1f hours left)",
+ printf(LOG "%sskipping because of user requested delay (%.1f hours left)\n",
$bpc->timeStamp, ($StatusHost{backoffTime} - time) / 3600);
NothingToDo($needLink);
}
$lastIncr = $Backups[$i]{startTime}
if ( $lastIncr < $Backups[$i]{startTime} );
} elsif ( $Backups[$i]{type} eq "partial" ) {
- $partialIdx = $i;
- $partialNum = $Backups[$i]{num};
+ $partialIdx = $i;
+ $lastPartial = $Backups[$i]{startTime};
+ $partialNum = $Backups[$i]{num};
}
}
print("dump failed: unable to open/create $Dir/XferLOG$fileExt\n");
exit(1);
}
+
+#
+# Ignore the partial dump in the case of an incremental
+# or when the partial is too old. A partial is a partial full.
+#
+if ( $type ne "full" || time - $lastPartial > $Conf{PartialAgeMax} * 24*3600 ) {
+ $partialNum = undef;
+ $partialIdx = -1;
+}
+
+#
+# If this is a partial, copy the old XferLOG file
+#
+if ( $partialNum ) {
+ my($compress, $fileName);
+ if ( -f "$Dir/XferLOG.$partialNum.z" ) {
+ $fileName = "$Dir/XferLOG.$partialNum.z";
+ $compress = 1;
+ } elsif ( -f "$Dir/XferLOG.$partialNum" ) {
+ $fileName = "$Dir/XferLOG.$partialNum";
+ $compress = 0;
+ }
+ if ( my $oldLOG = BackupPC::FileZIO->open($fileName, 0, $compress) ) {
+ my $data;
+ while ( $oldLOG->read(\$data, 65536) > 0 ) {
+ $XferLOG->write(\$data);
+ }
+ $oldLOG->close;
+ }
+}
+
$XferLOG->writeTeeStderr(1) if ( $opts{v} );
-unlink("$Dir/NewFileList");
-my $startTime = time();
+unlink("$Dir/NewFileList") if ( -f "$Dir/NewFileList" );
+my $startTime = time();
my $tarErrs = 0;
my $nFilesExist = 0;
my $sizeExist = 0;
backups => \@Backups,
compress => $Conf{CompressLevel},
XferMethod => $Conf{XferMethod},
+ logLevel => $Conf{XferLogLevel},
pidHandler => \&pidHandler,
partialNum => $partialNum,
});
# kill off the tar process, first nicely then forcefully
#
if ( $tarPid > 0 ) {
- kill(2, $tarPid);
+ kill($bpc->sigName2num("INT"), $tarPid);
sleep(1);
- kill(9, $tarPid);
+ kill($bpc->sigName2num("KILL"), $tarPid);
}
if ( @xferPid ) {
- kill(2, @xferPid);
+ kill($bpc->sigName2num("INT"), @xferPid);
sleep(1);
- kill(9, @xferPid);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
exit(1);
while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) {
$_ = $1;
$tarOut = $2;
- $XferLOG->write(\"tarExtract: $_\n");
+ if ( /^ / ) {
+ $XferLOG->write(\"$_\n");
+ } else {
+ $XferLOG->write(\"tarExtract: $_\n");
+ }
+ if ( /^BackupPC_tarExtact aborting \((.*)\)/ ) {
+ $stat{hostError} = $1;
+ }
if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) {
$tarErrs += $1;
$nFilesExist += $2;
# kill off the tranfer program, first nicely then forcefully
#
if ( @xferPid ) {
- kill(2, @xferPid);
+ kill($bpc->sigName2num("INT"), @xferPid);
sleep(1);
- kill(9, @xferPid);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
#
# kill off the tar process, first nicely then forcefully
#
if ( $tarPid > 0 ) {
- kill(2, $tarPid);
+ kill($bpc->sigName2num("INT"), $tarPid);
sleep(1);
- kill(9, $tarPid);
+ kill($bpc->sigName2num("KILL"), $tarPid);
}
#
# don't do any more shares on this host
$stat{xferOK} = 0;
}
+$stat{xferOK} = 0 if ( $Abort );
+
#
# Do one last check to make sure it is still the machine we expect.
#
}
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
-$XferLOG->close();
close($newFilesFH) if ( defined($newFilesFH) );
my $endTime = time();
# If the dump failed, clean up
#
if ( !$stat{xferOK} ) {
- #
- # wait a short while and see if the system is still alive
- #
$stat{hostError} = $stat{lastOutputLine} if ( $stat{hostError} eq "" );
if ( $stat{hostError} ) {
print(LOG $bpc->timeStamp,
"Got fatal error during xfer ($stat{hostError})\n");
+ $XferLOG->write(\"Got fatal error during xfer ($stat{hostError})\n");
}
- sleep(10);
- if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
- $stat{hostAbort} = 1;
- }
- if ( $stat{hostAbort} ) {
- $stat{hostError} = "lost network connection during backup";
+ if ( !$Abort ) {
+ #
+ # wait a short while and see if the system is still alive
+ #
+ sleep(5);
+ if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+ $stat{hostAbort} = 1;
+ }
+ if ( $stat{hostAbort} ) {
+ $stat{hostError} = "lost network connection during backup";
+ }
+ print(LOG $bpc->timeStamp, "Backup aborted ($stat{hostError})\n");
+ $XferLOG->write(\"Backup aborted ($stat{hostError})\n");
+ } else {
+ $XferLOG->write(\"Backup aborted by user signal\n");
}
- print(LOG $bpc->timeStamp, "Dump aborted ($stat{hostError})\n");
#
- # This exits.
+ # Close the log file and call BackupFailCleanup, which exits.
#
BackupFailCleanup();
}
sub catch_signal
{
- my $signame = shift;
+ my $sigName = shift;
#
- # Children quit quietly on ALRM
+ # The first time we receive a signal we try to gracefully
+ # abort the backup. This allows us to keep a partial dump
+ # with the in-progress file deleted and attribute caches
+ # flushed to disk etc.
#
- exit(1) if ( $Pid != $$ && $signame eq "ALRM" );
+ if ( !length($SigName) ) {
+ my $reason;
+ if ( $sigName eq "INT" ) {
+ $reason = "aborted by user (signal=$sigName)";
+ } else {
+ $reason = "aborted by signal=$sigName";
+ }
+ if ( $Pid == $$ ) {
+ #
+ # Parent logs a message
+ #
+ print(LOG $bpc->timeStamp,
+ "Aborting backup up after signal $sigName\n");
+
+ #
+ # Tell xfer to abort
+ #
+ $xfer->abort($reason);
+
+ #
+ # Send ALRMs to BackupPC_tarExtract if we are using it
+ #
+ if ( $tarPid > 0 ) {
+ kill($bpc->sigName2num("ARLM"), $tarPid);
+ }
+
+ #
+ # Schedule a 20 second timer in case the clean
+ # abort doesn't complete
+ #
+ alarm(20);
+ } else {
+ #
+ # Children ignore anything other than ALRM and INT
+ #
+ if ( $sigName ne "ALRM" && $sigName ne "INT" ) {
+ return;
+ }
+
+ #
+ # The child also tells xfer to abort
+ #
+ $xfer->abort($reason);
+
+ #
+ # Schedule a 15 second timer in case the clean
+ # abort doesn't complete
+ #
+ alarm(15);
+ }
+ $SigName = $sigName;
+ $Abort = 1;
+ return;
+ }
+
+ #
+ # This is a second signal: time to clean up.
+ #
+ if ( $Pid != $$ && ($sigName eq "ALRM" || $sigName eq "INT") ) {
+ #
+ # Children quit quietly on ALRM or INT
+ #
+ exit(1)
+ }
#
# Ignore other signals in children
#
return if ( $Pid != $$ );
- print(LOG $bpc->timeStamp, "cleaning up after signal $signame\n");
- $SIG{$signame} = 'IGNORE';
+ $SIG{$sigName} = 'IGNORE';
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
- $XferLOG->write(\"exiting after signal $signame\n");
- $XferLOG->close();
+ $XferLOG->write(\"exiting after signal $sigName\n");
if ( @xferPid ) {
- kill(2, @xferPid);
+ kill($bpc->sigName2num("INT"), @xferPid);
sleep(1);
- kill(9, @xferPid);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
if ( $tarPid > 0 ) {
- kill(2, $tarPid);
+ kill($bpc->sigName2num("INT"), $tarPid);
sleep(1);
- kill(9, $tarPid);
+ kill($bpc->sigName2num("KILL"), $tarPid);
}
- if ( $signame eq "INT" ) {
- $stat{hostError} = "aborted by user (signal=$signame)";
+ if ( $sigName eq "INT" ) {
+ $stat{hostError} = "aborted by user (signal=$sigName)";
} else {
- $stat{hostError} = "received signal=$signame";
+ $stat{hostError} = "received signal=$sigName";
}
BackupFailCleanup();
}
+sub CheckForNewFiles
+{
+ if ( -f _ && $File::Find::name !~ /\/fattrib$/ ) {
+ $nFilesTotal++;
+ } elsif ( -d _ ) {
+ #
+ # No need to check entire tree
+ #
+ $File::Find::prune = 1 if ( $nFilesTotal );
+ }
+}
+
sub BackupFailCleanup
{
my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+ my $keepPartial = 0;
+
+ #
+ # We keep this backup if it is a full and we actually backed
+ # up some files.
+ #
+ if ( $type eq "full" ) {
+ if ( $nFilesTotal == 0 && $xfer->getStats->{fileCnt} == 0 ) {
+ #
+ # Xfer didn't report any files, but check in the new
+ # directory just in case.
+ #
+ find(\&CheckForNewFiles, "$Dir/new");
+ $keepPartial = 1 if ( $nFilesTotal );
+ } else {
+ #
+ # Xfer reported some files
+ #
+ $keepPartial = 1;
+ }
+ }
- if ( $type ne "full"
- || ($nFilesTotal == 0 && $xfer->getStats->{fileCnt} == 0) ) {
+ #
+ # Don't keep partials if they are disabled
+ #
+ $keepPartial = 0 if ( $Conf{PartialAgeMax} < 0 );
+
+ if ( !$keepPartial ) {
#
# No point in saving this dump; get rid of eveything.
#
- unlink("$Dir/timeStamp.level0");
- unlink("$Dir/SmbLOG.bad");
- unlink("$Dir/SmbLOG.bad$fileExt");
- unlink("$Dir/XferLOG.bad");
- unlink("$Dir/XferLOG.bad$fileExt");
- unlink("$Dir/NewFileList");
+ $XferLOG->close();
+ unlink("$Dir/timeStamp.level0") if ( -f "$Dir/timeStamp.level0" );
+ unlink("$Dir/SmbLOG.bad") if ( -f "$Dir/SmbLOG.bad" );
+ unlink("$Dir/SmbLOG.bad$fileExt") if ( -f "$Dir/SmbLOG.bad$fileExt" );
+ unlink("$Dir/XferLOG.bad") if ( -f "$Dir/XferLOG.bad" );
+ unlink("$Dir/XferLOG.bad$fileExt") if ( -f "$Dir/XferLOG.bad$fileExt" );
+ unlink("$Dir/NewFileList") if ( -f "$Dir/NewFileList" );
rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
$bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
print("dump failed: $stat{hostError}\n");
+ $XferLOG->close();
print("link $clientURI\n") if ( $needLink );
exit(1);
}
my(@Backups) = $bpc->BackupInfoRead($client);
my($cntFull, $cntIncr, $firstFull, $firstIncr, $oldestIncr, $oldestFull);
+ if ( $Conf{FullKeepCnt} <= 0 ) {
+ print(LOG $bpc->timeStamp,
+ "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n");
+ print(STDERR
+ "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n")
+ if ( $opts{v} );
+ return;
+ }
while ( 1 ) {
$cntFull = $cntIncr = 0;
$oldestIncr = $oldestFull = 0;
print(LOG $bpc->timeStamp,
"removing incr backup $Backups[$firstIncr]{num}\n");
BackupRemove($client, \@Backups, $firstIncr);
- } elsif ( ($cntFull > $Conf{FullKeepCnt}
- || ($cntFull > $Conf{FullKeepCntMin}
- && $oldestFull > $Conf{FullAgeMax}))
+ next;
+ }
+
+ #
+ # Delete any old full backups, according to $Conf{FullKeepCntMin}
+ # and $Conf{FullAgeMax}.
+ #
+ # First make sure that $Conf{FullAgeMax} is at least bigger
+ # than $Conf{FullPeriod} * $Conf{FullKeepCnt}, including
+ # the exponential array case.
+ #
+ my $fullKeepCnt = $Conf{FullKeepCnt};
+ $fullKeepCnt = [$fullKeepCnt] if ( ref($fullKeepCnt) ne "ARRAY" );
+ my $fullAgeMax;
+ my $fullPeriod = int(0.5 + $Conf{FullPeriod});
+ for ( my $i = 0 ; $i < @$fullKeepCnt ; $i++ ) {
+ $fullAgeMax += $fullKeepCnt->[$i] * $fullPeriod;
+ $fullPeriod *= 2;
+ }
+ $fullAgeMax += $fullPeriod; # add some buffer
+
+ if ( $cntFull > $Conf{FullKeepCntMin}
+ && $oldestFull > $Conf{FullAgeMax}
+ && $oldestFull > $fullAgeMax
+ && $Conf{FullKeepCntMin} > 0
+ && $Conf{FullAgeMax} > 0
&& (@Backups <= $firstFull + 1
|| !$Backups[$firstFull + 1]{noFill}) ) {
#
# filled.)
#
print(LOG $bpc->timeStamp,
- "removing full backup $Backups[$firstFull]{num}\n");
+ "removing old full backup $Backups[$firstFull]{num}\n");
BackupRemove($client, \@Backups, $firstFull);
- } else {
- last;
- }
+ next;
+ }
+
+ #
+ # Do new-style full backup expiry, which includes the the case
+ # where $Conf{FullKeepCnt} is an array.
+ #
+ last if ( !BackupFullExpire($client, \@Backups) );
}
$bpc->BackupInfoWrite($client, @Backups);
}
+#
+# Handle full backup expiry, using exponential periods.
+#
+sub BackupFullExpire
+{
+ my($client, $Backups) = @_;
+ my $fullCnt = 0;
+ my $fullPeriod = $Conf{FullPeriod};
+ my $origFullPeriod = $fullPeriod;
+ my $fullKeepCnt = $Conf{FullKeepCnt};
+ my $fullKeepIdx = 0;
+ my(@delete, @fullList);
+
+ #
+ # Don't delete anything if $Conf{FullPeriod} or $Conf{FullKeepCnt} are
+ # not defined - possibly a corrupted config.pl file.
+ #
+ return if ( !defined($Conf{FullPeriod}) || !defined($Conf{FullKeepCnt}) );
+
+ #
+ # If regular backups are still disabled with $Conf{FullPeriod} < 0,
+ # we still expire backups based on a typical FullPeriod value - weekly.
+ #
+ $fullPeriod = 7 if ( $fullPeriod <= 0 );
+
+ $fullKeepCnt = [$fullKeepCnt] if ( ref($fullKeepCnt) ne "ARRAY" );
+
+ for ( my $i = 0 ; $i < @$Backups ; $i++ ) {
+ next if ( $Backups->[$i]{type} ne "full" );
+ push(@fullList, $i);
+ }
+ for ( my $k = @fullList - 1 ; $k >= 0 ; $k-- ) {
+ my $i = $fullList[$k];
+ my $prevFull = $fullList[$k-1] if ( $k > 0 );
+ #
+ # Don't delete any full that is followed by an unfilled backup,
+ # since it is needed for restore.
+ #
+ my $noDelete = $i + 1 < @$Backups ? $Backups->[$i+1]{noFill} : 0;
+
+ if ( !$noDelete &&
+ ($fullKeepIdx >= @$fullKeepCnt
+ || $k > 0
+ && $fullKeepIdx > 0
+ && $Backups->[$i]{startTime} - $Backups->[$prevFull]{startTime}
+ < ($fullPeriod - $origFullPeriod / 2) * 24 * 3600
+ )
+ ) {
+ #
+ # Delete the full backup
+ #
+ #printf("Deleting backup $i ($prevFull)\n");
+ push(@delete, $i);
+ } else {
+ $fullCnt++;
+ while ( $fullKeepIdx < @$fullKeepCnt
+ && $fullCnt >= $fullKeepCnt->[$fullKeepIdx] ) {
+ $fullKeepIdx++;
+ $fullCnt = 0;
+ $fullPeriod = 2 * $fullPeriod;
+ }
+ }
+ }
+ #
+ # Now actually delete the backups
+ #
+ for ( my $i = @delete - 1 ; $i >= 0 ; $i-- ) {
+ print(LOG $bpc->timeStamp,
+ "removing full backup $Backups->[$delete[$i]]{num}\n");
+ BackupRemove($client, $Backups, $delete[$i]);
+ }
+ return @delete;
+}
+
#
# Removes any partial backups
#
$num = $Backups[$i]{num} if ( $num < $Backups[$i]{num} );
}
$num++;
- $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$num")
- if ( -d "$Dir/$num" );
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$num") if ( -d "$Dir/$num" );
if ( !rename("$Dir/new", "$Dir/$num") ) {
- print(LOG $bpc->timeStamp,
- "Rename $Dir/new -> $Dir/$num failed\n");
+ print(LOG $bpc->timeStamp, "Rename $Dir/new -> $Dir/$num failed\n");
$stat{xferOK} = 0;
}
- rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$num$fileExt");
- rename("$Dir/NewFileList", "$Dir/NewFileList.$num");
+ $needLink = 1 if ( -f "$Dir/NewFileList" );
#
# Add the new backup information to the backup file
$Backups[$i]{mangle} = 1; # name mangling always on for v1.04+
$bpc->BackupInfoWrite($client, @Backups);
- unlink("$Dir/timeStamp.level0");
+ unlink("$Dir/timeStamp.level0") if ( -f "$Dir/timeStamp.level0" );
+ foreach my $ext ( qw(bad bad.z) ) {
+ next if ( !-f "$Dir/XferLOG.$ext" );
+ unlink("$Dir/XferLOG.$ext.old") if ( -f "$Dir/XferLOG.$ext" );
+ rename("$Dir/XferLOG.$ext", "$Dir/XferLOG.$ext.old");
+ }
#
# Now remove the bad files, replacing them if possible with links to
next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
if ( !link("$Dir/$Backups[$j]{num}/$file",
"$Dir/$num/$shareM/$fileM") ) {
- print(LOG $bpc->timeStamp,
- "Unable to link $num/$shareM/$fileM to"
- . " $Backups[$j]{num}/$file\n");
+ my $str = \"Unable to link $num/$f->{share}/$f->{file} to"
+ . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+ $XferLOG->write(\$str);
} else {
- print(LOG $bpc->timeStamp,
- "Bad file $num/$shareM/$fileM replaced by link to"
- . " $Backups[$j]{num}/$file\n");
+ my $str = "Bad file $num/$f->{share}/$f->{file} replaced"
+ . " by link to"
+ . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+ $XferLOG->write(\$str);
}
last;
}
if ( $j < 0 ) {
- print(LOG $bpc->timeStamp,
- "Removed bad file $num/$shareM/$fileM (no older"
- . " copy to link to)\n");
+ my $str = "Removed bad file $num/$f->{share}/$f->{file}"
+ . " (no older copy to link to)\n";
+ $XferLOG->write(\$str);
}
}
+ $XferLOG->close();
+ rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$num$fileExt");
+ rename("$Dir/NewFileList", "$Dir/NewFileList.$num");
+
return $num;
}
#
sub UserCommandRun
{
- my($type) = @_;
+ my($cmdType) = @_;
- return if ( !defined($Conf{$type}) );
+ return if ( !defined($Conf{$cmdType}) );
my $vars = {
xfer => $xfer,
client => $client,
xferOK => $stat{xferOK} || 0,
hostError => $stat{hostError},
type => $type,
+ cmdType => $cmdType,
};
- my $cmd = $bpc->cmdVarSubstitute($Conf{$type}, $vars);
- $XferLOG->write(\"Executing $type: @$cmd\n");
+ my $cmd = $bpc->cmdVarSubstitute($Conf{$cmdType}, $vars);
+ $XferLOG->write(\"Executing $cmdType: @$cmd\n");
#
# Run the user's command, dumping the stdout/stderr into the
# Xfer log file. Also supply the optional $vars and %Conf in