-#!/bin/perl -T
+#!/bin/perl
#============================================================= -*-perl-*-
#
# BackupPC_dump: Dump a single client.
#
# DESCRIPTION
#
-# Usage: BackupPC_dump [-i] [-f] [-d] [-e] <client>
+# Usage: BackupPC_dump [-i] [-f] [-d] [-e] [-v] <client>
#
# Flags:
#
# dhcp hosts that are no longer on the network will not expire
# old backups.
#
+# -v verbose. for manual usage: prints failure reasons in more detail.
+#
# BackupPC_dump is run periodically by BackupPC to backup $client.
# The file $TopDir/pc/$client/backups is read to decide whether a
# full or incremental backup needs to be run. If no backup is
# Craig Barratt <cbarratt@users.sourceforge.net>
#
# COPYRIGHT
-# Copyright (C) 2001 Craig Barratt
+# Copyright (C) 2001-2003 Craig Barratt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
#
#========================================================================
#
-# Version 2.0.0_CVS, released 3 Feb 2003.
+# Version 2.1.0beta2, released 23 May 2004.
#
# See http://backuppc.sourceforge.net.
#
#========================================================================
use strict;
+no utf8;
use lib "/usr/local/BackupPC/lib";
use BackupPC::Lib;
use BackupPC::FileZIO;
use BackupPC::Xfer::Smb;
use BackupPC::Xfer::Tar;
use BackupPC::Xfer::Rsync;
+use Socket;
use File::Path;
+use File::Find;
use Getopt::Std;
###########################################################################
my %Conf = $bpc->Conf();
my $NeedPostCmd;
my $Hosts;
+my $SigName;
+my $Abort;
$bpc->ChildInit();
my %opts;
-getopts("defi", \%opts);
-if ( @ARGV != 1 ) {
- print("usage: $0 [-d] [-e] [-f] [-i] <client>\n");
+if ( !getopts("defiv", \%opts) || @ARGV != 1 ) {
+ print("usage: $0 [-d] [-e] [-f] [-i] [-v] <client>\n");
exit(1);
}
if ( $ARGV[0] !~ /^([\w\.\s-]+)$/ ) {
my($clientURI, $user);
+$bpc->verbose(1) if ( $opts{v} );
+
if ( $opts{d} ) {
#
# The client name $client is simply a DHCP address. We need to check
# host name via NetBios using nmblookup.
#
$hostIP = $client;
- exit(1) if ( $bpc->CheckHostAlive($hostIP) < 0
- || $Conf{NmbLookupCmd} eq "" );
+ if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+ print(STDERR "Exiting because CheckHostAlive($hostIP) failed\n")
+ if ( $opts{v} );
+ exit(1);
+ }
+ if ( $Conf{NmbLookupCmd} eq "" ) {
+ print(STDERR "Exiting because \$Conf{NmbLookupCmd} is empty\n")
+ if ( $opts{v} );
+ exit(1);
+ }
($client, $user) = $bpc->NetBiosInfoGet($hostIP);
- exit(1) if ( $host !~ /^([\w\.\s-]+)$/ );
+ if ( $client !~ /^([\w\.\s-]+)$/ ) {
+ print(STDERR "Exiting because NetBiosInfoGet($hostIP) returned"
+ . " '$client', an invalid host name\n") if ( $opts{v} );
+ exit(1)
+ }
$Hosts = $bpc->HostInfoRead($client);
- exit(1) if ( !defined($Hosts->{$client}) );
$host = $client;
} else {
$Hosts = $bpc->HostInfoRead($client);
}
+if ( !defined($Hosts->{$client}) ) {
+ print(STDERR "Exiting because host $client does not exist in the"
+ . " hosts file\n") if ( $opts{v} );
+ exit(1)
+}
my $Dir = "$TopDir/pc/$client";
-my $xferPid = -1;
+my @xferPid = ();
my $tarPid = -1;
#
$SIG{ALRM} = \&catch_signal;
$SIG{TERM} = \&catch_signal;
$SIG{PIPE} = \&catch_signal;
+$SIG{STOP} = \&catch_signal;
+$SIG{TSTP} = \&catch_signal;
+$SIG{TTIN} = \&catch_signal;
my $Pid = $$;
#
exit(0);
}
+#
+# For archive hosts we don't bother any further
+#
+if ($Conf{XferMethod} eq "archive" ) {
+ print(STDERR "Exiting because the XferMethod is set to archive\n")
+ if ( $opts{v} );
+ exit(0);
+}
+
if ( !$opts{d} ) {
#
# In the non-DHCP case, make sure the host can be looked up
# Ok, NS doesn't know about it. Maybe it is a NetBios name
# instead.
#
+ print(STDERR "Name server doesn't know about $host; trying NetBios\n")
+ if ( $opts{v} );
if ( !defined($hostIP = $bpc->NetBiosHostIPFind($host)) ) {
- print(LOG $bpc->timeStamp,
- "dump failed: Can't find host $host\n");
- print("dump failed: Can't find host $host\n");
+ print(LOG $bpc->timeStamp, "Can't find host $host via netbios\n");
+ print("host not found\n");
exit(1);
}
} else {
if ( $opts{d} ) {
if ( $StatusHost{activeJob} ) {
# oops, something is already running for this host
+ print(STDERR "Exiting because backup is already running for $client\n")
+ if ( $opts{v} );
exit(0);
}
print("DHCP $hostIP $clientURI\n");
my($needLink, @Backups, $type, $lastBkupNum, $lastFullBkupNum);
my $lastFull = 0;
my $lastIncr = 0;
+my $partialIdx = -1;
+my $partialNum;
+my $lastPartial = 0;
if ( $Conf{FullPeriod} == -1 && !$opts{f} && !$opts{i}
|| $Conf{FullPeriod} == -2 ) {
+ print(STDERR "Exiting because backups are disabled with"
+ . " \$Conf{FullPeriod} = $Conf{FullPeriod}\n") if ( $opts{v} );
+ #
+ # Tell BackupPC to ignore old failed backups on hosts that
+ # have backups disabled.
+ #
+ print("backups disabled\n")
+ if ( defined($StatusHost{errorTime})
+ && $StatusHost{reason} ne "Reason_backup_done"
+ && time - $StatusHost{errorTime} > 4 * 24 * 3600 );
NothingToDo($needLink);
}
&& $StatusHost{aliveCnt} >= $Conf{BlackoutGoodCnt} ) {
my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
my($currHours) = $hour + $min / 60 + $sec / 3600;
- if ( $Conf{BlackoutHourBegin} <= $currHours
- && $currHours <= $Conf{BlackoutHourEnd}
- && grep($_ == $wday, @{$Conf{BlackoutWeekDays}}) ) {
-# print(LOG $bpc->timeStamp, "skipping because of blackout"
-# . " (alive $StatusHost{aliveCnt} times)\n");
- NothingToDo($needLink);
+ my $blackout;
+
+ #
+ # Handle backward compatibility with original separate scalar
+ # parameters.
+ #
+ if ( defined($Conf{BlackoutHourBegin}) ) {
+ push(@{$Conf{BlackoutPeriods}},
+ {
+ hourBegin => $Conf{BlackoutHourBegin},
+ hourEnd => $Conf{BlackoutHourEnd},
+ weekDays => $Conf{BlackoutWeekDays},
+ }
+ );
+ }
+ foreach my $p ( @{$Conf{BlackoutPeriods}} ) {
+ #
+ # Allow blackout to span midnight (specified by BlackoutHourBegin
+ # being greater than BlackoutHourEnd)
+ #
+ next if ( ref($p->{weekDays}) ne "ARRAY"
+ || !defined($p->{hourBegin})
+ || !defined($p->{hourEnd})
+ );
+ if ( $p->{hourBegin} > $p->{hourEnd} ) {
+ $blackout = $p->{hourBegin} <= $currHours
+ || $currHours <= $p->{hourEnd};
+ if ( $currHours <= $p->{hourEnd} ) {
+ #
+ # This is after midnight, so decrement the weekday for the
+ # weekday check (eg: Monday 11pm-1am means Monday 2300 to
+ # Tuesday 0100, not Monday 2300-2400 plus Monday 0000-0100).
+ #
+ $wday--;
+ $wday += 7 if ( $wday < 0 );
+ }
+ } else {
+ $blackout = $p->{hourBegin} <= $currHours
+ && $currHours <= $p->{hourEnd};
+ }
+ if ( $blackout && grep($_ == $wday, @{$p->{weekDays}}) ) {
+# print(LOG $bpc->timeStamp, "skipping because of blackout"
+# . " (alive $StatusHost{aliveCnt} times)\n");
+ print(STDERR "Skipping $client because of blackout\n")
+ if ( $opts{v} );
+ NothingToDo($needLink);
+ }
}
}
$lastFull = $Backups[$i]{startTime};
$lastFullBkupNum = $Backups[$i]{num};
}
- } else {
+ } elsif ( $Backups[$i]{type} eq "incr" ) {
$lastIncr = $Backups[$i]{startTime}
if ( $lastIncr < $Backups[$i]{startTime} );
+ } elsif ( $Backups[$i]{type} eq "partial" ) {
+ $partialIdx = $i;
+ $lastPartial = $Backups[$i]{startTime};
+ $partialNum = $Backups[$i]{num};
}
}
print("dump failed: unable to open/create $Dir/XferLOG$fileExt\n");
exit(1);
}
-unlink("$Dir/NewFileList");
-my $startTime = time();
+#
+# Ignore the partial dump in the case of an incremental
+# or when the partial is too old. A partial is a partial full.
+#
+if ( $type ne "full" || time - $lastPartial > $Conf{PartialAgeMax} * 24*3600 ) {
+ $partialNum = undef;
+ $partialIdx = -1;
+}
+
+#
+# If this is a partial, copy the old XferLOG file
+#
+if ( $partialNum ) {
+ my($compress, $fileName);
+ if ( -f "$Dir/XferLOG.$partialNum.z" ) {
+ $fileName = "$Dir/XferLOG.$partialNum.z";
+ $compress = 1;
+ } elsif ( -f "$Dir/XferLOG.$partialNum" ) {
+ $fileName = "$Dir/XferLOG.$partialNum";
+ $compress = 0;
+ }
+ if ( my $oldLOG = BackupPC::FileZIO->open($fileName, 0, $compress) ) {
+ my $data;
+ while ( $oldLOG->read(\$data, 65536) > 0 ) {
+ $XferLOG->write(\$data);
+ }
+ $oldLOG->close;
+ }
+}
+
+$XferLOG->writeTeeStderr(1) if ( $opts{v} );
+unlink("$Dir/NewFileList") if ( -f "$Dir/NewFileList" );
+
+my $startTime = time();
my $tarErrs = 0;
my $nFilesExist = 0;
my $sizeExist = 0;
my $sizeExistComp = 0;
my $nFilesTotal = 0;
my $sizeTotal = 0;
-my($logMsg, %stat, $xfer, $ShareNames);
+my($logMsg, %stat, $xfer, $ShareNames, $noFilesErr);
my $newFilesFH;
if ( $Conf{XferMethod} eq "tar" ) {
# This xfer method outputs a tar format file, so we start a
# BackupPC_tarExtract to extract the data.
#
- # Create a pipe to connect the Xfer method to BackupPC_tarExtract
+ # Create a socketpair to connect the Xfer method to BackupPC_tarExtract
# WH is the write handle for writing, provided to the transport
- # program, and RH is the other end of the pipe for reading,
+ # program, and RH is the other end of the socket for reading,
# provided to BackupPC_tarExtract.
#
- pipe(RH, WH);
+ if ( socketpair(RH, WH, AF_UNIX, SOCK_STREAM, PF_UNSPEC) ) {
+ shutdown(RH, 1); # no writing to this socket
+ shutdown(WH, 0); # no reading from this socket
+ setsockopt(RH, SOL_SOCKET, SO_RCVBUF, 8 * 65536);
+ setsockopt(WH, SOL_SOCKET, SO_SNDBUF, 8 * 65536);
+ } else {
+ #
+ # Default to pipe() if socketpair() doesn't work.
+ #
+ pipe(RH, WH);
+ }
#
# fork a child for BackupPC_tarExtract. TAR is a file handle
close(WH);
last;
}
+ binmode(TAR);
if ( !$tarPid ) {
#
# This is the tar child. Close the write end of the pipe,
open(STDERR, ">&STDOUT");
close(STDIN);
open(STDIN, "<&RH");
+ alarm(0);
exec("$BinDir/BackupPC_tarExtract", $client, $shareName,
$Conf{CompressLevel});
print(LOG $bpc->timeStamp,
open(NEW_FILES, ">", "$TopDir/pc/$client/NewFileList")
|| die("can't open $TopDir/pc/$client/NewFileList");
$newFilesFH = *NEW_FILES;
+ binmode(NEW_FILES);
}
#
backups => \@Backups,
compress => $Conf{CompressLevel},
XferMethod => $Conf{XferMethod},
+ logLevel => $Conf{XferLogLevel},
+ pidHandler => \&pidHandler,
+ partialNum => $partialNum,
});
if ( !defined($logMsg = $xfer->start()) ) {
# kill off the tar process, first nicely then forcefully
#
if ( $tarPid > 0 ) {
- kill(2, $tarPid);
+ kill($bpc->sigName2num("INT"), $tarPid);
+ sleep(1);
+ kill($bpc->sigName2num("KILL"), $tarPid);
+ }
+ if ( @xferPid ) {
+ kill($bpc->sigName2num("INT"), @xferPid);
sleep(1);
- kill(9, $tarPid);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
exit(1);
}
- $xferPid = $xfer->xferPid;
+ @xferPid = $xfer->xferPid;
+
if ( $useTar ) {
#
# The parent must close both handles on the pipe since the children
#
close(RH);
close(WH);
- print(LOG $bpc->timeStamp, $logMsg,
- " (xferPid=$xferPid, tarPid=$tarPid)\n");
- } elsif ( $xferPid > 0 ) {
- print(LOG $bpc->timeStamp, $logMsg, " (xferPid=$xferPid)\n");
- } else {
- print(LOG $bpc->timeStamp, $logMsg, "\n");
}
- print("started $type dump, pid=$xferPid, tarPid=$tarPid,"
- . " share=$shareName\n");
+ print(LOG $bpc->timeStamp, $logMsg, "\n");
+ print("started $type dump, share=$shareName\n");
- if ( $useTar || $xferPid > 0 ) {
+ pidHandler(@xferPid);
+
+ if ( $useTar ) {
#
# Parse the output of the transfer program and BackupPC_tarExtract
# while they run. Since we might be reading from two or more children
while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) {
$_ = $1;
$tarOut = $2;
- $XferLOG->write(\"tarExtract: $_\n");
+ if ( /^ / ) {
+ $XferLOG->write(\"$_\n");
+ } else {
+ $XferLOG->write(\"tarExtract: $_\n");
+ }
+ if ( /^BackupPC_tarExtact aborting \((.*)\)/ ) {
+ $stat{hostError} = $1;
+ }
if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) {
$tarErrs += $1;
$nFilesExist += $2;
# the transfer.
#
if ( my $errMsg = CorrectHostCheck($hostIP, $host) ) {
- $stat{hostError} = $errMsg;
+ $stat{hostError} = $errMsg if ( $stat{hostError} eq "" );
last SCAN;
}
}
# Merge the xfer status (need to accumulate counts)
#
my $newStat = $xfer->getStats;
+ if ( $newStat->{fileCnt} == 0 ) {
+ $noFilesErr ||= "No files dumped for share $shareName";
+ }
foreach my $k ( (keys(%stat), keys(%$newStat)) ) {
next if ( !defined($newStat->{$k}) );
if ( $k =~ /Cnt$/ ) {
#
# kill off the tranfer program, first nicely then forcefully
#
- if ( $xferPid > 0 ) {
- kill(2, $xferPid);
+ if ( @xferPid ) {
+ kill($bpc->sigName2num("INT"), @xferPid);
sleep(1);
- kill(9, $xferPid);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
#
# kill off the tar process, first nicely then forcefully
#
if ( $tarPid > 0 ) {
- kill(2, $tarPid);
+ kill($bpc->sigName2num("INT"), $tarPid);
sleep(1);
- kill(9, $tarPid);
+ kill($bpc->sigName2num("KILL"), $tarPid);
}
#
# don't do any more shares on this host
last;
}
}
-my $lastNum = -1;
+
+#
+# If this is a full, and any share had zero files then consider the dump bad
+#
+if ( $type eq "full" && $stat{hostError} eq ""
+ && length($noFilesErr) && $Conf{BackupZeroFilesIsFatal} ) {
+ $stat{hostError} = $noFilesErr;
+ $stat{xferOK} = 0;
+}
+
+$stat{xferOK} = 0 if ( $Abort );
#
# Do one last check to make sure it is still the machine we expect.
}
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
-$XferLOG->close();
close($newFilesFH) if ( defined($newFilesFH) );
-if ( $stat{xferOK} ) {
- @Backups = $bpc->BackupInfoRead($client);
- for ( my $i = 0 ; $i < @Backups ; $i++ ) {
- $lastNum = $Backups[$i]{num} if ( $lastNum < $Backups[$i]{num} );
- }
- $lastNum++;
- $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$lastNum")
- if ( -d "$Dir/$lastNum" );
- if ( !rename("$Dir/new", "$Dir/$lastNum") ) {
- print(LOG $bpc->timeStamp,
- "Rename $Dir/new -> $Dir/$lastNum failed\n");
- $stat{xferOK} = 0;
- }
- rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$lastNum$fileExt");
- rename("$Dir/NewFileList", "$Dir/NewFileList.$lastNum");
-}
my $endTime = time();
#
# If the dump failed, clean up
#
if ( !$stat{xferOK} ) {
- #
- # wait a short while and see if the system is still alive
- #
$stat{hostError} = $stat{lastOutputLine} if ( $stat{hostError} eq "" );
if ( $stat{hostError} ) {
print(LOG $bpc->timeStamp,
"Got fatal error during xfer ($stat{hostError})\n");
+ $XferLOG->write(\"Got fatal error during xfer ($stat{hostError})\n");
}
- sleep(10);
- if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
- $stat{hostAbort} = 1;
- }
- if ( $stat{hostAbort} ) {
- $stat{hostError} = "lost network connection during backup";
- }
- print(LOG $bpc->timeStamp, "Dump aborted ($stat{hostError})\n");
- unlink("$Dir/timeStamp.level0");
- unlink("$Dir/SmbLOG.bad");
- unlink("$Dir/SmbLOG.bad$fileExt");
- unlink("$Dir/XferLOG.bad");
- unlink("$Dir/XferLOG.bad$fileExt");
- unlink("$Dir/NewFileList");
- rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
- $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
- print("dump failed: $stat{hostError}\n");
- print("link $clientURI\n") if ( $needLink );
- exit(1);
-}
-
-#
-# Add the new backup information to the backup file
-#
-@Backups = $bpc->BackupInfoRead($client);
-my $i = @Backups;
-$Backups[$i]{num} = $lastNum;
-$Backups[$i]{type} = $type;
-$Backups[$i]{startTime} = $startTime;
-$Backups[$i]{endTime} = $endTime;
-$Backups[$i]{size} = $sizeTotal;
-$Backups[$i]{nFiles} = $nFilesTotal;
-$Backups[$i]{xferErrs} = $stat{xferErrCnt} || 0;
-$Backups[$i]{xferBadFile} = $stat{xferBadFileCnt} || 0;
-$Backups[$i]{xferBadShare} = $stat{xferBadShareCnt} || 0;
-$Backups[$i]{nFilesExist} = $nFilesExist;
-$Backups[$i]{sizeExist} = $sizeExist;
-$Backups[$i]{sizeExistComp} = $sizeExistComp;
-$Backups[$i]{tarErrs} = $tarErrs;
-$Backups[$i]{compress} = $Conf{CompressLevel};
-$Backups[$i]{noFill} = $type eq "full" ? 0 : 1;
-$Backups[$i]{mangle} = 1; # name mangling always on for v1.04+
-$bpc->BackupInfoWrite($client, @Backups);
-
-unlink("$Dir/timeStamp.level0");
-
-#
-# Now remove the bad files, replacing them if possible with links to
-# earlier backups.
-#
-foreach my $file ( $xfer->getBadFiles ) {
- my $j;
- unlink("$Dir/$lastNum/$file");
- for ( $j = $i - 1 ; $j >= 0 ; $j-- ) {
- next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
- if ( !link("$Dir/$Backups[$j]{num}/$file", "$Dir/$lastNum/$file") ) {
- print(LOG $bpc->timeStamp,
- "Unable to link $lastNum/$file to"
- . " $Backups[$j]{num}/$file\n");
- } else {
- print(LOG $bpc->timeStamp,
- "Bad file $lastNum/$file replaced by link to"
- . " $Backups[$j]{num}/$file\n");
- }
- last;
- }
- if ( $j < 0 ) {
- print(LOG $bpc->timeStamp,
- "Removed bad file $lastNum/$file (no older"
- . " copy to link to)\n");
+ if ( !$Abort ) {
+ #
+ # wait a short while and see if the system is still alive
+ #
+ sleep(5);
+ if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+ $stat{hostAbort} = 1;
+ }
+ if ( $stat{hostAbort} ) {
+ $stat{hostError} = "lost network connection during backup";
+ }
+ print(LOG $bpc->timeStamp, "Backup aborted ($stat{hostError})\n");
+ $XferLOG->write(\"Backup aborted ($stat{hostError})\n");
+ } else {
+ $XferLOG->write(\"Backup aborted by user signal\n");
}
+
+ #
+ # Close the log file and call BackupFailCleanup, which exits.
+ #
+ BackupFailCleanup();
}
+my $newNum = BackupSave();
+
my $otherCount = $stat{xferErrCnt} - $stat{xferBadFileCnt}
- $stat{xferBadShareCnt};
print(LOG $bpc->timeStamp,
- "$type backup $lastNum complete, $stat{fileCnt} files,"
+ "$type backup $newNum complete, $stat{fileCnt} files,"
. " $stat{byteCnt} bytes,"
. " $stat{xferErrCnt} xferErrs ($stat{xferBadFileCnt} bad files,"
. " $stat{xferBadShareCnt} bad shares, $otherCount other)\n");
sub catch_signal
{
- my $signame = shift;
- my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+ my $sigName = shift;
+
+ #
+ # The first time we receive a signal we try to gracefully
+ # abort the backup. This allows us to keep a partial dump
+ # with the in-progress file deleted and attribute caches
+ # flushed to disk etc.
+ #
+ if ( !length($SigName) ) {
+ my $reason;
+ if ( $sigName eq "INT" ) {
+ $reason = "aborted by user (signal=$sigName)";
+ } else {
+ $reason = "aborted by signal=$sigName";
+ }
+ if ( $Pid == $$ ) {
+ #
+ # Parent logs a message
+ #
+ print(LOG $bpc->timeStamp,
+ "Aborting backup up after signal $sigName\n");
+
+ #
+ # Tell xfer to abort
+ #
+ $xfer->abort($reason);
+
+ #
+ # Send ALRMs to BackupPC_tarExtract if we are using it
+ #
+ if ( $tarPid > 0 ) {
+ kill($bpc->sigName2num("ARLM"), $tarPid);
+ }
+
+ #
+ # Schedule a 20 second timer in case the clean
+ # abort doesn't complete
+ #
+ alarm(20);
+ } else {
+ #
+ # Children ignore anything other than ALRM and INT
+ #
+ if ( $sigName ne "ALRM" && $sigName ne "INT" ) {
+ return;
+ }
+
+ #
+ # The child also tells xfer to abort
+ #
+ $xfer->abort($reason);
+
+ #
+ # Schedule a 15 second timer in case the clean
+ # abort doesn't complete
+ #
+ alarm(15);
+ }
+ $SigName = $sigName;
+ $Abort = 1;
+ return;
+ }
#
- # Ignore signals in children
+ # This is a second signal: time to clean up.
+ #
+ if ( $Pid != $$ && ($sigName eq "ALRM" || $sigName eq "INT") ) {
+ #
+ # Children quit quietly on ALRM or INT
+ #
+ exit(1)
+ }
+
+ #
+ # Ignore other signals in children
#
return if ( $Pid != $$ );
- print(LOG $bpc->timeStamp, "cleaning up after signal $signame\n");
- $SIG{$signame} = 'IGNORE';
+ $SIG{$sigName} = 'IGNORE';
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
- $XferLOG->write(\"exiting after signal $signame\n");
- $XferLOG->close();
- if ( $xferPid > 0 ) {
- if ( kill(2, $xferPid) <= 0 ) {
- sleep(1);
- kill(9, $xferPid);
- }
+ $XferLOG->write(\"exiting after signal $sigName\n");
+ if ( @xferPid ) {
+ kill($bpc->sigName2num("INT"), @xferPid);
+ sleep(1);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
if ( $tarPid > 0 ) {
- if ( kill(2, $tarPid) <= 0 ) {
- sleep(1);
- kill(9, $tarPid);
- }
+ kill($bpc->sigName2num("INT"), $tarPid);
+ sleep(1);
+ kill($bpc->sigName2num("KILL"), $tarPid);
}
- unlink("$Dir/timeStamp.level0");
- unlink("$Dir/NewFileList");
- unlink("$Dir/XferLOG.bad");
- unlink("$Dir/XferLOG.bad$fileExt");
- rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
- $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
- if ( $signame eq "INT" ) {
- print("dump failed: aborted by user (signal=$signame)\n");
+ if ( $sigName eq "INT" ) {
+ $stat{hostError} = "aborted by user (signal=$sigName)";
} else {
- print("dump failed: received signal=$signame\n");
+ $stat{hostError} = "received signal=$sigName";
+ }
+ BackupFailCleanup();
+}
+
+sub CheckForNewFiles
+{
+ if ( -f _ && $File::Find::name !~ /\/fattrib$/ ) {
+ $nFilesTotal++;
+ } elsif ( -d _ ) {
+ #
+ # No need to check entire tree
+ #
+ $File::Find::prune = 1 if ( $nFilesTotal );
}
+}
+
+sub BackupFailCleanup
+{
+ my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+ my $keepPartial = 0;
+
+ #
+ # We keep this backup if it is a full and we actually backed
+ # up some files.
+ #
+ if ( $type eq "full" ) {
+ if ( $nFilesTotal == 0 && $xfer->getStats->{fileCnt} == 0 ) {
+ #
+ # Xfer didn't report any files, but check in the new
+ # directory just in case.
+ #
+ find(\&CheckForNewFiles, "$Dir/new");
+ $keepPartial = 1 if ( $nFilesTotal );
+ } else {
+ #
+ # Xfer reported some files
+ #
+ $keepPartial = 1;
+ }
+ }
+
+ #
+ # Don't keep partials if they are disabled
+ #
+ $keepPartial = 0 if ( $Conf{PartialAgeMax} < 0 );
+
+ if ( !$keepPartial ) {
+ #
+ # No point in saving this dump; get rid of eveything.
+ #
+ $XferLOG->close();
+ unlink("$Dir/timeStamp.level0") if ( -f "$Dir/timeStamp.level0" );
+ unlink("$Dir/SmbLOG.bad") if ( -f "$Dir/SmbLOG.bad" );
+ unlink("$Dir/SmbLOG.bad$fileExt") if ( -f "$Dir/SmbLOG.bad$fileExt" );
+ unlink("$Dir/XferLOG.bad") if ( -f "$Dir/XferLOG.bad" );
+ unlink("$Dir/XferLOG.bad$fileExt") if ( -f "$Dir/XferLOG.bad$fileExt" );
+ unlink("$Dir/NewFileList") if ( -f "$Dir/NewFileList" );
+ rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
+ print("dump failed: $stat{hostError}\n");
+ $XferLOG->close();
+ print("link $clientURI\n") if ( $needLink );
+ exit(1);
+ }
+ #
+ # Ok, now we should save this as a partial dump
+ #
+ $type = "partial";
+ my $newNum = BackupSave();
+ print("dump failed: $stat{hostError}\n");
print("link $clientURI\n") if ( $needLink );
- exit(1);
+ print(LOG $bpc->timeStamp, "Saved partial dump $newNum\n");
+ exit(2);
}
#
my(@Backups) = $bpc->BackupInfoRead($client);
my($cntFull, $cntIncr, $firstFull, $firstIncr, $oldestIncr, $oldestFull);
+ if ( $Conf{FullKeepCnt} <= 0 ) {
+ print(LOG $bpc->timeStamp,
+ "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n");
+ print(STDERR
+ "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n")
+ if ( $opts{v} );
+ return;
+ }
while ( 1 ) {
$cntFull = $cntIncr = 0;
$oldestIncr = $oldestFull = 0;
- for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
if ( $Backups[$i]{type} eq "full" ) {
$firstFull = $i if ( $cntFull == 0 );
$cntFull++;
#
print(LOG $bpc->timeStamp,
"removing incr backup $Backups[$firstIncr]{num}\n");
- $bpc->RmTreeDefer("$TopDir/trash",
- "$Dir/$Backups[$firstIncr]{num}");
- unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}")
- if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}" );
- unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}.z")
- if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}.z" );
- unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}")
- if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}" );
- unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}.z")
- if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}.z" );
- splice(@Backups, $firstIncr, 1);
- } elsif ( ($cntFull > $Conf{FullKeepCnt}
- || ($cntFull > $Conf{FullKeepCntMin}
- && $oldestFull > $Conf{FullAgeMax}))
+ BackupRemove($client, \@Backups, $firstIncr);
+ next;
+ }
+
+ #
+ # Delete any old full backups, according to $Conf{FullKeepCntMin}
+ # and $Conf{FullAgeMax}.
+ #
+ if ( $cntFull > $Conf{FullKeepCntMin}
+ && $oldestFull > $Conf{FullAgeMax}
&& (@Backups <= $firstFull + 1
|| !$Backups[$firstFull + 1]{noFill}) ) {
#
# filled.)
#
print(LOG $bpc->timeStamp,
- "removing full backup $Backups[$firstFull]{num}\n");
- $bpc->RmTreeDefer("$TopDir/trash",
- "$Dir/$Backups[$firstFull]{num}");
- unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}")
- if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}" );
- unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}.z")
- if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}.z" );
- unlink("$Dir/XferLOG.$Backups[$firstFull]{num}")
- if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}" );
- unlink("$Dir/XferLOG.$Backups[$firstFull]{num}.z")
- if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}.z" );
- splice(@Backups, $firstFull, 1);
- } else {
+ "removing old full backup $Backups[$firstFull]{num}\n");
+ BackupRemove($client, \@Backups, $firstFull);
+ next;
+ }
+
+ #
+ # Do new-style full backup expiry, which includes the the case
+ # where $Conf{FullKeepCnt} is an array.
+ #
+ last if ( !BackupFullExpire($client, \@Backups) );
+ }
+ $bpc->BackupInfoWrite($client, @Backups);
+}
+
+#
+# Handle full backup expiry, using exponential periods.
+#
+sub BackupFullExpire
+{
+ my($client, $Backups) = @_;
+ my $fullCnt = 0;
+ my $fullPeriod = $Conf{FullPeriod};
+ my $origFullPeriod = $fullPeriod;
+ my $fullKeepCnt = $Conf{FullKeepCnt};
+ my $fullKeepIdx = 0;
+ my(@delete, @fullList);
+
+ #
+ # Don't delete anything if $Conf{FullPeriod} or $Conf{FullKeepCnt} are
+ # not defined - possibly a corrupted config.pl file.
+ #
+ return if ( !defined($Conf{FullPeriod}) || !defined($Conf{FullKeepCnt}) );
+
+ #
+ # If regular backups are still disabled with $Conf{FullPeriod} < 0,
+ # we still expire backups based on a typical FullPeriod value - weekly.
+ #
+ $fullPeriod = 7 if ( $fullPeriod <= 0 );
+
+ $fullKeepCnt = [$fullKeepCnt] if ( ref($fullKeepCnt) ne "ARRAY" );
+
+ for ( my $i = 0 ; $i < @$Backups ; $i++ ) {
+ next if ( $Backups->[$i]{type} ne "full" );
+ push(@fullList, $i);
+ }
+ for ( my $k = @fullList - 1 ; $k >= 0 ; $k-- ) {
+ my $i = $fullList[$k];
+ my $prevFull = $fullList[$k-1] if ( $k > 0 );
+ #
+ # Don't delete any full that is followed by an unfilled backup,
+ # since it is needed for restore.
+ #
+ my $noDelete = $i + 1 < @$Backups ? $Backups->[$i+1]{noFill} : 0;
+
+ if ( !$noDelete &&
+ ($fullKeepIdx >= @$fullKeepCnt
+ || $k > 0
+ && $fullKeepIdx > 0
+ && $Backups->[$i]{startTime} - $Backups->[$prevFull]{startTime}
+ < ($fullPeriod - $origFullPeriod / 2) * 24 * 3600
+ )
+ ) {
+ #
+ # Delete the full backup
+ #
+ #printf("Deleting backup $i ($prevFull)\n");
+ push(@delete, $i);
+ } else {
+ $fullCnt++;
+ while ( $fullKeepIdx < @$fullKeepCnt
+ && $fullCnt >= $fullKeepCnt->[$fullKeepIdx] ) {
+ $fullKeepIdx++;
+ $fullCnt = 0;
+ $fullPeriod = 2 * $fullPeriod;
+ }
+ }
+ }
+ #
+ # Now actually delete the backups
+ #
+ for ( my $i = @delete - 1 ; $i >= 0 ; $i-- ) {
+ print(LOG $bpc->timeStamp,
+ "removing full backup $Backups->[$delete[$i]]{num}\n");
+ BackupRemove($client, $Backups, $delete[$i]);
+ }
+ return @delete;
+}
+
+#
+# Removes any partial backups
+#
+sub BackupPartialRemove
+{
+ my($client, $Backups) = @_;
+
+ for ( my $i = @$Backups - 1 ; $i >= 0 ; $i-- ) {
+ next if ( $Backups->[$i]{type} ne "partial" );
+ BackupRemove($client, $Backups, $i);
+ }
+}
+
+sub BackupSave
+{
+ my @Backups = $bpc->BackupInfoRead($client);
+ my $num = -1;
+
+ #
+ # Since we got a good backup we should remove any partial dumps
+ # (the new backup might also be a partial, but that's ok).
+ #
+ BackupPartialRemove($client, \@Backups);
+
+ #
+ # Number the new backup
+ #
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ $num = $Backups[$i]{num} if ( $num < $Backups[$i]{num} );
+ }
+ $num++;
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$num") if ( -d "$Dir/$num" );
+ if ( !rename("$Dir/new", "$Dir/$num") ) {
+ print(LOG $bpc->timeStamp, "Rename $Dir/new -> $Dir/$num failed\n");
+ $stat{xferOK} = 0;
+ }
+ $needLink = 1 if ( -f "$Dir/NewFileList" );
+
+ #
+ # Add the new backup information to the backup file
+ #
+ my $i = @Backups;
+ $Backups[$i]{num} = $num;
+ $Backups[$i]{type} = $type;
+ $Backups[$i]{startTime} = $startTime;
+ $Backups[$i]{endTime} = $endTime;
+ $Backups[$i]{size} = $sizeTotal;
+ $Backups[$i]{nFiles} = $nFilesTotal;
+ $Backups[$i]{xferErrs} = $stat{xferErrCnt} || 0;
+ $Backups[$i]{xferBadFile} = $stat{xferBadFileCnt} || 0;
+ $Backups[$i]{xferBadShare} = $stat{xferBadShareCnt} || 0;
+ $Backups[$i]{nFilesExist} = $nFilesExist;
+ $Backups[$i]{sizeExist} = $sizeExist;
+ $Backups[$i]{sizeExistComp} = $sizeExistComp;
+ $Backups[$i]{tarErrs} = $tarErrs;
+ $Backups[$i]{compress} = $Conf{CompressLevel};
+ $Backups[$i]{noFill} = $type eq "incr" ? 1 : 0;
+ $Backups[$i]{level} = $type eq "incr" ? 1 : 0;
+ $Backups[$i]{mangle} = 1; # name mangling always on for v1.04+
+ $bpc->BackupInfoWrite($client, @Backups);
+
+ unlink("$Dir/timeStamp.level0") if ( -f "$Dir/timeStamp.level0" );
+ foreach my $ext ( qw(bad bad.z) ) {
+ next if ( !-f "$Dir/XferLOG.$ext" );
+ unlink("$Dir/XferLOG.$ext.old") if ( -f "$Dir/XferLOG.$ext" );
+ rename("$Dir/XferLOG.$ext", "$Dir/XferLOG.$ext.old");
+ }
+
+ #
+ # Now remove the bad files, replacing them if possible with links to
+ # earlier backups.
+ #
+ foreach my $f ( $xfer->getBadFiles ) {
+ my $j;
+ my $shareM = $bpc->fileNameEltMangle($f->{share});
+ my $fileM = $bpc->fileNameMangle($f->{file});
+ unlink("$Dir/$num/$shareM/$fileM");
+ for ( $j = $i - 1 ; $j >= 0 ; $j-- ) {
+ my $file;
+ if ( $Backups[$j]{mangle} ) {
+ $file = "$shareM/$fileM";
+ } else {
+ $file = "$f->{share}/$f->{file}";
+ }
+ next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
+ if ( !link("$Dir/$Backups[$j]{num}/$file",
+ "$Dir/$num/$shareM/$fileM") ) {
+ my $str = \"Unable to link $num/$f->{share}/$f->{file} to"
+ . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+ $XferLOG->write(\$str);
+ } else {
+ my $str = "Bad file $num/$f->{share}/$f->{file} replaced"
+ . " by link to"
+ . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+ $XferLOG->write(\$str);
+ }
last;
}
+ if ( $j < 0 ) {
+ my $str = "Removed bad file $num/$f->{share}/$f->{file}"
+ . " (no older copy to link to)\n";
+ $XferLOG->write(\$str);
+ }
}
- $bpc->BackupInfoWrite($client, @Backups);
+ $XferLOG->close();
+ rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$num$fileExt");
+ rename("$Dir/NewFileList", "$Dir/NewFileList.$num");
+
+ return $num;
+}
+
+#
+# Removes a specific backup
+#
+sub BackupRemove
+{
+ my($client, $Backups, $idx) = @_;
+ my($Dir) = "$TopDir/pc/$client";
+
+ $bpc->RmTreeDefer("$TopDir/trash",
+ "$Dir/$Backups->[$idx]{num}");
+ unlink("$Dir/SmbLOG.$Backups->[$idx]{num}")
+ if ( -f "$Dir/SmbLOG.$Backups->[$idx]{num}" );
+ unlink("$Dir/SmbLOG.$Backups->[$idx]{num}.z")
+ if ( -f "$Dir/SmbLOG.$Backups->[$idx]{num}.z" );
+ unlink("$Dir/XferLOG.$Backups->[$idx]{num}")
+ if ( -f "$Dir/XferLOG.$Backups->[$idx]{num}" );
+ unlink("$Dir/XferLOG.$Backups->[$idx]{num}.z")
+ if ( -f "$Dir/XferLOG.$Backups->[$idx]{num}.z" );
+ splice(@{$Backups}, $idx, 1);
}
sub CorrectHostCheck
return;
}
+#
+# The Xfer method might tell us from time to time about processes
+# it forks. We tell BackupPC about this (for status displays) and
+# keep track of the pids in case we cancel the backup
+#
+sub pidHandler
+{
+ @xferPid = @_;
+ @xferPid = grep(/./, @xferPid);
+ return if ( !@xferPid && $tarPid < 0 );
+ my @pids = @xferPid;
+ push(@pids, $tarPid) if ( $tarPid > 0 );
+ my $str = join(",", @pids);
+ $XferLOG->write(\"Xfer PIDs are now $str\n") if ( defined($XferLOG) );
+ print("xferPids $str\n");
+}
+
#
# Run an optional pre- or post-dump command
#
LOG => *LOG,
XferLOG => $XferLOG,
stat => \%stat,
- xferOK => $stat{xferOK},
+ xferOK => $stat{xferOK} || 0,
+ hostError => $stat{hostError},
type => $type,
};
my $cmd = $bpc->cmdVarSubstitute($Conf{$type}, $vars);