-#!/bin/perl -T
+#!/bin/perl
#============================================================= -*-perl-*-
#
-# BackupPC_dump: Dump a single PC.
+# BackupPC_dump: Dump a single client.
#
# DESCRIPTION
#
-# Usage: BackupPC_dump [-i] [-f] [-d] [-e] <host>
+# Usage: BackupPC_dump [-i] [-f] [-d] [-e] [-v] <client>
#
# Flags:
#
#
# -f Do a full dump, overriding any scheduling.
#
-# -d Host is a DHCP pool address, so initially we have no
-# idea which machine this actually is. BackupPC_dump
-# determines the actual PC host name by using the NetBios
-# name.
+# -d Host is a DHCP pool address, and the client argument
+# just an IP address. We lookup the NetBios name from
+# the IP address.
#
-# -e Just do an dump expiry check for the host. Don't do anything else. # This is used periodically by BackupPC to make sure that dhcp hosts
-# have correctly expired old backups. Without this, dhcp hosts that
-# are no longer on the network will not expire old backups.
+# -e Just do an dump expiry check for the client. Don't do anything
+# else. This is used periodically by BackupPC to make sure that
+# dhcp hosts have correctly expired old backups. Without this,
+# dhcp hosts that are no longer on the network will not expire
+# old backups.
#
-# BackupPC_dump is run periodically by BackupPC to backup $host.
-# The file $TopDir/pc/$host/backups is read to decide whether a
+# -v verbose. for manual usage: prints failure reasons in more detail.
+#
+# BackupPC_dump is run periodically by BackupPC to backup $client.
+# The file $TopDir/pc/$client/backups is read to decide whether a
# full or incremental backup needs to be run. If no backup is
-# scheduled, or a ping to $host fails, then BackupPC_dump quits.
+# scheduled, or a ping to $client fails, then BackupPC_dump quits.
#
# The backup is done using the selected XferMethod (smb, tar, rsync etc),
-# extracting the dump into $TopDir/pc/$host/new. The xfer output is
-# put into $TopDir/pc/$host/XferLOG.
+# extracting the dump into $TopDir/pc/$client/new. The xfer output is
+# put into $TopDir/pc/$client/XferLOG.
#
# If the dump succeeds (based on parsing the output of the XferMethod):
-# - $TopDir/pc/$host/new is renamed to $TopDir/pc/$host/nnn, where
+# - $TopDir/pc/$client/new is renamed to $TopDir/pc/$client/nnn, where
# nnn is the next sequential dump number.
-# - $TopDir/pc/$host/XferLOG is renamed to $TopDir/pc/$host/XferLOG.nnn.
-# - $TopDir/pc/$host/backups is updated.
+# - $TopDir/pc/$client/XferLOG is renamed to $TopDir/pc/$client/XferLOG.nnn.
+# - $TopDir/pc/$client/backups is updated.
#
# If the dump fails:
-# - $TopDir/pc/$host/new is moved to $TopDir/trash for later removal.
-# - $TopDir/pc/$host/XferLOG is renamed to $TopDir/pc/$host/XferLOG.bad
+# - $TopDir/pc/$client/new is moved to $TopDir/trash for later removal.
+# - $TopDir/pc/$client/XferLOG is renamed to $TopDir/pc/$client/XferLOG.bad
# for later viewing.
#
# BackupPC_dump communicates to BackupPC via printing to STDOUT.
# Craig Barratt <cbarratt@users.sourceforge.net>
#
# COPYRIGHT
-# Copyright (C) 2001 Craig Barratt
+# Copyright (C) 2001-2003 Craig Barratt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
#
#========================================================================
#
-# Version 1.6.0_CVS, released 10 Dec 2002.
+# Version 2.1.0, released 20 Jun 2004.
#
# See http://backuppc.sourceforge.net.
#
#========================================================================
use strict;
+no utf8;
use lib "/usr/local/BackupPC/lib";
use BackupPC::Lib;
use BackupPC::FileZIO;
use BackupPC::Xfer::Smb;
use BackupPC::Xfer::Tar;
use BackupPC::Xfer::Rsync;
+use Socket;
use File::Path;
+use File::Find;
use Getopt::Std;
###########################################################################
my $TopDir = $bpc->TopDir();
my $BinDir = $bpc->BinDir();
my %Conf = $bpc->Conf();
+my $NeedPostCmd;
+my $Hosts;
+my $SigName;
+my $Abort;
$bpc->ChildInit();
my %opts;
-getopts("defi", \%opts);
-if ( @ARGV != 1 ) {
- print("usage: $0 [-d] [-e] [-f] [-i] <host>\n");
+if ( !getopts("defiv", \%opts) || @ARGV != 1 ) {
+ print("usage: $0 [-d] [-e] [-f] [-i] [-v] <client>\n");
exit(1);
}
-if ( $ARGV[0] !~ /^([\w\.-]+)$/ ) {
- print("$0: bad host name '$ARGV[0]'\n");
+if ( $ARGV[0] !~ /^([\w\.\s-]+)$/ ) {
+ print("$0: bad client name '$ARGV[0]'\n");
exit(1);
}
-my $hostIP = $1;
-my($host, $user);
+my $client = $1; # BackupPC's client name (might not be real host name)
+my $hostIP; # this is the IP address
+my $host; # this is the real host name
+
+my($clientURI, $user);
+
+$bpc->verbose(1) if ( $opts{v} );
if ( $opts{d} ) {
#
- # The host name $hostIP is simply a DHCP address. We need to check
+ # The client name $client is simply a DHCP address. We need to check
# if there is any machine at this address, and if so, get the actual
# host name via NetBios using nmblookup.
#
- exit(1) if ( $bpc->CheckHostAlive($hostIP) < 0 );
- ($host, $user) = $bpc->NetBiosInfoGet($hostIP);
- exit(1) if ( $host !~ /^([\w\.-]+)$/ );
- my $hosts = $bpc->HostInfoRead($host);
- exit(1) if ( !defined($hosts->{$host}) );
+ $hostIP = $client;
+ if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+ print(STDERR "Exiting because CheckHostAlive($hostIP) failed\n")
+ if ( $opts{v} );
+ exit(1);
+ }
+ if ( $Conf{NmbLookupCmd} eq "" ) {
+ print(STDERR "Exiting because \$Conf{NmbLookupCmd} is empty\n")
+ if ( $opts{v} );
+ exit(1);
+ }
+ ($client, $user) = $bpc->NetBiosInfoGet($hostIP);
+ if ( $client !~ /^([\w\.\s-]+)$/ ) {
+ print(STDERR "Exiting because NetBiosInfoGet($hostIP) returned"
+ . " '$client', an invalid host name\n") if ( $opts{v} );
+ exit(1)
+ }
+ $Hosts = $bpc->HostInfoRead($client);
+ $host = $client;
} else {
- $host = $hostIP;
+ $Hosts = $bpc->HostInfoRead($client);
+}
+if ( !defined($Hosts->{$client}) ) {
+ print(STDERR "Exiting because host $client does not exist in the"
+ . " hosts file\n") if ( $opts{v} );
+ exit(1)
}
-my $Dir = "$TopDir/pc/$host";
-my $xferPid = -1;
+my $Dir = "$TopDir/pc/$client";
+my @xferPid = ();
my $tarPid = -1;
#
# Re-read config file, so we can include the PC-specific config
#
-if ( defined(my $error = $bpc->ConfigRead($host)) ) {
- print("Can't read PC's config file: $error\n");
+$clientURI = $bpc->uriEsc($client);
+if ( defined(my $error = $bpc->ConfigRead($client)) ) {
+ print("dump failed: Can't read PC's config file: $error\n");
exit(1);
}
%Conf = $bpc->Conf();
$SIG{INT} = \&catch_signal;
$SIG{ALRM} = \&catch_signal;
$SIG{TERM} = \&catch_signal;
+$SIG{PIPE} = \&catch_signal;
+$SIG{STOP} = \&catch_signal;
+$SIG{TSTP} = \&catch_signal;
+$SIG{TTIN} = \&catch_signal;
+my $Pid = $$;
#
# Make sure we eventually timeout if there is no activity from
# the data transport program.
#
-alarm($Conf{SmbClientTimeout});
+alarm($Conf{ClientTimeout});
mkpath($Dir, 0, 0777) if ( !-d $Dir );
if ( !-f "$Dir/LOCK" ) {
- open(LOCK, ">$Dir/LOCK") && close(LOCK);
+ open(LOCK, ">", "$Dir/LOCK") && close(LOCK);
}
-open(LOG, ">>$Dir/LOG");
+open(LOG, ">>", "$Dir/LOG");
select(LOG); $| = 1; select(STDOUT);
-###########################################################################
-# Figure out what to do and do it
-###########################################################################
-
#
# For the -e option we just expire backups and quit
#
if ( $opts{e} ) {
- BackupExpire($host);
+ BackupExpire($client);
exit(0);
}
+#
+# For archive hosts we don't bother any further
+#
+if ($Conf{XferMethod} eq "archive" ) {
+ print(STDERR "Exiting because the XferMethod is set to archive\n")
+ if ( $opts{v} );
+ exit(0);
+}
+
+if ( !$opts{d} ) {
+ #
+ # In the non-DHCP case, make sure the host can be looked up
+ # via NS, or otherwise find the IP address via NetBios.
+ #
+ if ( $Conf{ClientNameAlias} ne "" ) {
+ $host = $Conf{ClientNameAlias};
+ } else {
+ $host = $client;
+ }
+ if ( !defined(gethostbyname($host)) ) {
+ #
+ # Ok, NS doesn't know about it. Maybe it is a NetBios name
+ # instead.
+ #
+ print(STDERR "Name server doesn't know about $host; trying NetBios\n")
+ if ( $opts{v} );
+ if ( !defined($hostIP = $bpc->NetBiosHostIPFind($host)) ) {
+ print(LOG $bpc->timeStamp, "Can't find host $host via netbios\n");
+ print("host not found\n");
+ exit(1);
+ }
+ } else {
+ $hostIP = $host;
+ }
+}
+
+###########################################################################
+# Figure out what to do and do it
+###########################################################################
+
#
# See if we should skip this host during a certain range
# of times.
print(LOG $bpc->timeStamp, "Can't connect to server ($err)\n");
exit(1);
}
-my $reply = $bpc->ServerMesg("status host($host)");
+my $reply = $bpc->ServerMesg("status host($clientURI)");
$reply = $1 if ( $reply =~ /(.*)/s );
my(%StatusHost);
eval($reply);
if ( $opts{d} ) {
if ( $StatusHost{activeJob} ) {
# oops, something is already running for this host
+ print(STDERR "Exiting because backup is already running for $client\n")
+ if ( $opts{v} );
exit(0);
}
- print("DHCP $hostIP $host\n");
+ print("DHCP $hostIP $clientURI\n");
}
my($needLink, @Backups, $type, $lastBkupNum, $lastFullBkupNum);
my $lastFull = 0;
my $lastIncr = 0;
+my $partialIdx = -1;
+my $partialNum;
+my $lastPartial = 0;
if ( $Conf{FullPeriod} == -1 && !$opts{f} && !$opts{i}
|| $Conf{FullPeriod} == -2 ) {
+ print(STDERR "Exiting because backups are disabled with"
+ . " \$Conf{FullPeriod} = $Conf{FullPeriod}\n") if ( $opts{v} );
+ #
+ # Tell BackupPC to ignore old failed backups on hosts that
+ # have backups disabled.
+ #
+ print("backups disabled\n")
+ if ( defined($StatusHost{errorTime})
+ && $StatusHost{reason} ne "Reason_backup_done"
+ && time - $StatusHost{errorTime} > 4 * 24 * 3600 );
NothingToDo($needLink);
}
&& $StatusHost{aliveCnt} >= $Conf{BlackoutGoodCnt} ) {
my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
my($currHours) = $hour + $min / 60 + $sec / 3600;
- if ( $Conf{BlackoutHourBegin} <= $currHours
- && $currHours <= $Conf{BlackoutHourEnd}
- && grep($_ == $wday, @{$Conf{BlackoutWeekDays}}) ) {
-# print(LOG $bpc->timeStamp, "skipping because of blackout"
-# . " (alive $StatusHost{aliveCnt} times)\n");
- NothingToDo($needLink);
+ my $blackout;
+
+ #
+ # Handle backward compatibility with original separate scalar
+ # parameters.
+ #
+ if ( defined($Conf{BlackoutHourBegin}) ) {
+ push(@{$Conf{BlackoutPeriods}},
+ {
+ hourBegin => $Conf{BlackoutHourBegin},
+ hourEnd => $Conf{BlackoutHourEnd},
+ weekDays => $Conf{BlackoutWeekDays},
+ }
+ );
+ }
+ foreach my $p ( @{$Conf{BlackoutPeriods}} ) {
+ #
+ # Allow blackout to span midnight (specified by BlackoutHourBegin
+ # being greater than BlackoutHourEnd)
+ #
+ next if ( ref($p->{weekDays}) ne "ARRAY"
+ || !defined($p->{hourBegin})
+ || !defined($p->{hourEnd})
+ );
+ if ( $p->{hourBegin} > $p->{hourEnd} ) {
+ $blackout = $p->{hourBegin} <= $currHours
+ || $currHours <= $p->{hourEnd};
+ if ( $currHours <= $p->{hourEnd} ) {
+ #
+ # This is after midnight, so decrement the weekday for the
+ # weekday check (eg: Monday 11pm-1am means Monday 2300 to
+ # Tuesday 0100, not Monday 2300-2400 plus Monday 0000-0100).
+ #
+ $wday--;
+ $wday += 7 if ( $wday < 0 );
+ }
+ } else {
+ $blackout = $p->{hourBegin} <= $currHours
+ && $currHours <= $p->{hourEnd};
+ }
+ if ( $blackout && grep($_ == $wday, @{$p->{weekDays}}) ) {
+# print(LOG $bpc->timeStamp, "skipping because of blackout"
+# . " (alive $StatusHost{aliveCnt} times)\n");
+ print(STDERR "Skipping $client because of blackout\n")
+ if ( $opts{v} );
+ NothingToDo($needLink);
+ }
}
}
if ( !$opts{i} && !$opts{f} && $StatusHost{backoffTime} > time ) {
- printf(LOG "%sskipping because of user requested delay (%.1f hours left)",
+ printf(LOG "%sskipping because of user requested delay (%.1f hours left)\n",
$bpc->timeStamp, ($StatusHost{backoffTime} - time) / 3600);
NothingToDo($needLink);
}
#
# Now see if there are any old backups we should delete
#
-BackupExpire($host);
+BackupExpire($client);
#
# Read Backup information, and find times of the most recent full and
# incremental backups
#
-@Backups = $bpc->BackupInfoRead($host);
+@Backups = $bpc->BackupInfoRead($client);
for ( my $i = 0 ; $i < @Backups ; $i++ ) {
$needLink = 1 if ( $Backups[$i]{nFilesNew} eq ""
|| -f "$Dir/NewFileList.$Backups[$i]{num}" );
$lastFull = $Backups[$i]{startTime};
$lastFullBkupNum = $Backups[$i]{num};
}
- } else {
+ } elsif ( $Backups[$i]{type} eq "incr" ) {
$lastIncr = $Backups[$i]{startTime}
if ( $lastIncr < $Backups[$i]{startTime} );
+ } elsif ( $Backups[$i]{type} eq "partial" ) {
+ $partialIdx = $i;
+ $lastPartial = $Backups[$i]{startTime};
+ $partialNum = $Backups[$i]{num};
}
}
if ( $delay < 0 ) {
print(LOG $bpc->timeStamp, "no ping response\n");
print("no ping response\n");
- print("link $host\n") if ( $needLink );
+ print("link $clientURI\n") if ( $needLink );
exit(1);
} elsif ( $delay > $Conf{PingMaxMsec} ) {
printf(LOG "%sping too slow: %.4gmsec\n", $bpc->timeStamp, $delay);
printf("ping too slow: %.4gmsec (threshold is %gmsec)\n",
$delay, $Conf{PingMaxMsec});
- print("link $host\n") if ( $needLink );
+ print("link $clientURI\n") if ( $needLink );
exit(1);
}
print("dump failed: unable to open/create $Dir/XferLOG$fileExt\n");
exit(1);
}
-unlink("$Dir/NewFileList");
-my $startTime = time();
+#
+# Ignore the partial dump in the case of an incremental
+# or when the partial is too old. A partial is a partial full.
+#
+if ( $type ne "full" || time - $lastPartial > $Conf{PartialAgeMax} * 24*3600 ) {
+ $partialNum = undef;
+ $partialIdx = -1;
+}
+
+#
+# If this is a partial, copy the old XferLOG file
+#
+if ( $partialNum ) {
+ my($compress, $fileName);
+ if ( -f "$Dir/XferLOG.$partialNum.z" ) {
+ $fileName = "$Dir/XferLOG.$partialNum.z";
+ $compress = 1;
+ } elsif ( -f "$Dir/XferLOG.$partialNum" ) {
+ $fileName = "$Dir/XferLOG.$partialNum";
+ $compress = 0;
+ }
+ if ( my $oldLOG = BackupPC::FileZIO->open($fileName, 0, $compress) ) {
+ my $data;
+ while ( $oldLOG->read(\$data, 65536) > 0 ) {
+ $XferLOG->write(\$data);
+ }
+ $oldLOG->close;
+ }
+}
+
+$XferLOG->writeTeeStderr(1) if ( $opts{v} );
+unlink("$Dir/NewFileList") if ( -f "$Dir/NewFileList" );
+
+my $startTime = time();
my $tarErrs = 0;
my $nFilesExist = 0;
my $sizeExist = 0;
my $sizeExistComp = 0;
my $nFilesTotal = 0;
my $sizeTotal = 0;
-my($logMsg, %stat, $xfer, $ShareNames);
+my($logMsg, %stat, $xfer, $ShareNames, $noFilesErr);
my $newFilesFH;
if ( $Conf{XferMethod} eq "tar" ) {
$ShareNames = [ $ShareNames ] unless ref($ShareNames) eq "ARRAY";
+#
+# Run an optional pre-dump command
+#
+UserCommandRun("DumpPreUserCmd");
+$NeedPostCmd = 1;
+
#
# Now backup each of the shares
#
# Use rsync as the transport program.
#
if ( !defined($xfer = BackupPC::Xfer::Rsync->new($bpc)) ) {
- print(LOG $bpc->timeStamp,
- "dump failed: File::RsyncP module is not installed\n");
- print("dump failed: Rsync module is not installed\n");
+ my $errStr = BackupPC::Xfer::Rsync::errStr;
+ print(LOG $bpc->timeStamp, "dump failed: $errStr\n");
+ print("dump failed: $errStr\n");
+ UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
exit(1);
}
} else {
#
$xfer = BackupPC::Xfer::Smb->new($bpc);
}
+
my $useTar = $xfer->useTar;
if ( $useTar ) {
# This xfer method outputs a tar format file, so we start a
# BackupPC_tarExtract to extract the data.
#
- # Create a pipe to connect the Xfer method to BackupPC_tarExtract
+ # Create a socketpair to connect the Xfer method to BackupPC_tarExtract
# WH is the write handle for writing, provided to the transport
- # program, and RH is the other end of the pipe for reading,
+ # program, and RH is the other end of the socket for reading,
# provided to BackupPC_tarExtract.
#
- pipe(RH, WH);
+ if ( socketpair(RH, WH, AF_UNIX, SOCK_STREAM, PF_UNSPEC) ) {
+ shutdown(RH, 1); # no writing to this socket
+ shutdown(WH, 0); # no reading from this socket
+ setsockopt(RH, SOL_SOCKET, SO_RCVBUF, 8 * 65536);
+ setsockopt(WH, SOL_SOCKET, SO_SNDBUF, 8 * 65536);
+ } else {
+ #
+ # Default to pipe() if socketpair() doesn't work.
+ #
+ pipe(RH, WH);
+ }
#
# fork a child for BackupPC_tarExtract. TAR is a file handle
close(WH);
last;
}
+ binmode(TAR);
if ( !$tarPid ) {
#
# This is the tar child. Close the write end of the pipe,
open(STDERR, ">&STDOUT");
close(STDIN);
open(STDIN, "<&RH");
- exec("$BinDir/BackupPC_tarExtract '$host' '$shareName'"
- . " $Conf{CompressLevel}");
+ alarm(0);
+ exec("$BinDir/BackupPC_tarExtract", $client, $shareName,
+ $Conf{CompressLevel});
print(LOG $bpc->timeStamp,
"can't exec $BinDir/BackupPC_tarExtract\n");
exit(0);
# We need to create the NewFileList output file
#
local(*NEW_FILES);
- open(NEW_FILES, ">$TopDir/pc/$host/NewFileList")
- || die("can't open $TopDir/pc/$host/NewFileList");
+ open(NEW_FILES, ">", "$TopDir/pc/$client/NewFileList")
+ || die("can't open $TopDir/pc/$client/NewFileList");
$newFilesFH = *NEW_FILES;
+ binmode(NEW_FILES);
}
#
#
$xfer->args({
host => $host,
+ client => $client,
hostIP => $hostIP,
shareName => $shareName,
pipeRH => *RH,
lastFullBkupNum => $lastFullBkupNum,
backups => \@Backups,
compress => $Conf{CompressLevel},
- XferMethod => => $Conf{XferMethod},
+ XferMethod => $Conf{XferMethod},
+ logLevel => $Conf{XferLogLevel},
+ pidHandler => \&pidHandler,
+ partialNum => $partialNum,
});
if ( !defined($logMsg = $xfer->start()) ) {
print(LOG $bpc->timeStamp, "xfer start failed: ", $xfer->errStr, "\n");
print("dump failed: ", $xfer->errStr, "\n");
- print("link $host\n") if ( $needLink );
+ print("link $clientURI\n") if ( $needLink );
#
# kill off the tar process, first nicely then forcefully
#
if ( $tarPid > 0 ) {
- kill(2, $tarPid);
+ kill($bpc->sigName2num("INT"), $tarPid);
+ sleep(1);
+ kill($bpc->sigName2num("KILL"), $tarPid);
+ }
+ if ( @xferPid ) {
+ kill($bpc->sigName2num("INT"), @xferPid);
sleep(1);
- kill(9, $tarPid);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
+ UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
exit(1);
}
- $xferPid = $xfer->xferPid;
+ @xferPid = $xfer->xferPid;
+
if ( $useTar ) {
#
# The parent must close both handles on the pipe since the children
#
close(RH);
close(WH);
- print(LOG $bpc->timeStamp, $logMsg,
- " (xferPid=$xferPid, tarPid=$tarPid)\n");
- } elsif ( $xferPid > 0 ) {
- print(LOG $bpc->timeStamp, $logMsg, " (xferPid=$xferPid)\n");
- } else {
- print(LOG $bpc->timeStamp, $logMsg, "\n");
}
- print("started $type dump, pid=$xferPid, tarPid=$tarPid,"
- . " share=$shareName\n");
+ print(LOG $bpc->timeStamp, $logMsg, "\n");
+ print("started $type dump, share=$shareName\n");
- if ( $useTar || $xferPid > 0 ) {
+ pidHandler(@xferPid);
+
+ if ( $useTar ) {
#
# Parse the output of the transfer program and BackupPC_tarExtract
# while they run. Since we might be reading from two or more children
while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) {
$_ = $1;
$tarOut = $2;
- $XferLOG->write(\"tarExtract: $_\n");
+ if ( /^ / ) {
+ $XferLOG->write(\"$_\n");
+ } else {
+ $XferLOG->write(\"tarExtract: $_\n");
+ }
+ if ( /^BackupPC_tarExtact aborting \((.*)\)/ ) {
+ $stat{hostError} = $1;
+ }
if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) {
- $tarErrs = $1;
- $nFilesExist = $2;
- $sizeExist = $3;
- $sizeExistComp = $4;
- $nFilesTotal = $5;
- $sizeTotal = $6;
+ $tarErrs += $1;
+ $nFilesExist += $2;
+ $sizeExist += $3;
+ $sizeExistComp += $4;
+ $nFilesTotal += $5;
+ $sizeTotal += $6;
}
}
}
# the transfer.
#
if ( my $errMsg = CorrectHostCheck($hostIP, $host) ) {
- $stat{hostError} = $errMsg;
+ $stat{hostError} = $errMsg if ( $stat{hostError} eq "" );
last SCAN;
}
}
#
# otherwise the xfer module does everything for us
#
- ($tarErrs, $nFilesExist, $sizeExist, $sizeExistComp,
- $nFilesTotal, $sizeTotal) = $xfer->run();
+ my @results = $xfer->run();
+ $tarErrs += $results[0];
+ $nFilesExist += $results[1];
+ $sizeExist += $results[2];
+ $sizeExistComp += $results[3];
+ $nFilesTotal += $results[4];
+ $sizeTotal += $results[5];
}
#
# Merge the xfer status (need to accumulate counts)
#
my $newStat = $xfer->getStats;
+ if ( $newStat->{fileCnt} == 0 ) {
+ $noFilesErr ||= "No files dumped for share $shareName";
+ }
foreach my $k ( (keys(%stat), keys(%$newStat)) ) {
next if ( !defined($newStat->{$k}) );
if ( $k =~ /Cnt$/ ) {
#
# kill off the tranfer program, first nicely then forcefully
#
- if ( $xferPid > 0 ) {
- kill(2, $xferPid);
+ if ( @xferPid ) {
+ kill($bpc->sigName2num("INT"), @xferPid);
sleep(1);
- kill(9, $xferPid);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
#
# kill off the tar process, first nicely then forcefully
#
if ( $tarPid > 0 ) {
- kill(2, $tarPid);
+ kill($bpc->sigName2num("INT"), $tarPid);
sleep(1);
- kill(9, $tarPid);
+ kill($bpc->sigName2num("KILL"), $tarPid);
}
#
# don't do any more shares on this host
last;
}
}
-$XferLOG->close();
-close($newFilesFH) if ( defined($newFilesFH) );
-my $lastNum = -1;
+#
+# If this is a full, and any share had zero files then consider the dump bad
+#
+if ( $type eq "full" && $stat{hostError} eq ""
+ && length($noFilesErr) && $Conf{BackupZeroFilesIsFatal} ) {
+ $stat{hostError} = $noFilesErr;
+ $stat{xferOK} = 0;
+}
+
+$stat{xferOK} = 0 if ( $Abort );
#
# Do one last check to make sure it is still the machine we expect.
$stat{hostError} = $errMsg;
$stat{xferOK} = 0;
}
-if ( $stat{xferOK} ) {
- @Backups = $bpc->BackupInfoRead($host);
- for ( my $i = 0 ; $i < @Backups ; $i++ ) {
- $lastNum = $Backups[$i]{num} if ( $lastNum < $Backups[$i]{num} );
- }
- $lastNum++;
- $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$lastNum")
- if ( -d "$Dir/$lastNum" );
- if ( !rename("$Dir/new", "$Dir/$lastNum") ) {
- print(LOG $bpc->timeStamp,
- "Rename $Dir/new -> $Dir/$lastNum failed\n");
- $stat{xferOK} = 0;
- }
- rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$lastNum$fileExt");
- rename("$Dir/NewFileList", "$Dir/NewFileList.$lastNum");
-}
+
+UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
+close($newFilesFH) if ( defined($newFilesFH) );
+
my $endTime = time();
#
# If the dump failed, clean up
#
if ( !$stat{xferOK} ) {
- #
- # wait a short while and see if the system is still alive
- #
$stat{hostError} = $stat{lastOutputLine} if ( $stat{hostError} eq "" );
if ( $stat{hostError} ) {
print(LOG $bpc->timeStamp,
"Got fatal error during xfer ($stat{hostError})\n");
+ $XferLOG->write(\"Got fatal error during xfer ($stat{hostError})\n");
}
- sleep(10);
- if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
- $stat{hostAbort} = 1;
- }
- if ( $stat{hostAbort} ) {
- $stat{hostError} = "lost network connection during backup";
- }
- print(LOG $bpc->timeStamp, "Dump aborted ($stat{hostError})\n");
- unlink("$Dir/timeStamp.level0");
- unlink("$Dir/SmbLOG.bad");
- unlink("$Dir/SmbLOG.bad$fileExt");
- unlink("$Dir/XferLOG.bad");
- unlink("$Dir/XferLOG.bad$fileExt");
- unlink("$Dir/NewFileList");
- rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
- $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
- print("dump failed: $stat{hostError}\n");
- print("link $host\n") if ( $needLink );
- exit(1);
-}
-
-#
-# Add the new backup information to the backup file
-#
-@Backups = $bpc->BackupInfoRead($host);
-my $i = @Backups;
-$Backups[$i]{num} = $lastNum;
-$Backups[$i]{type} = $type;
-$Backups[$i]{startTime} = $startTime;
-$Backups[$i]{endTime} = $endTime;
-$Backups[$i]{size} = $sizeTotal;
-$Backups[$i]{nFiles} = $nFilesTotal;
-$Backups[$i]{xferErrs} = $stat{xferErrCnt} || 0;
-$Backups[$i]{xferBadFile} = $stat{xferBadFileCnt} || 0;
-$Backups[$i]{xferBadShare} = $stat{xferBadShareCnt} || 0;
-$Backups[$i]{nFilesExist} = $nFilesExist;
-$Backups[$i]{sizeExist} = $sizeExist;
-$Backups[$i]{sizeExistComp} = $sizeExistComp;
-$Backups[$i]{tarErrs} = $tarErrs;
-$Backups[$i]{compress} = $Conf{CompressLevel};
-$Backups[$i]{noFill} = $type eq "full" ? 0 : 1;
-$Backups[$i]{mangle} = 1; # name mangling always on for v1.04+
-$bpc->BackupInfoWrite($host, @Backups);
-
-unlink("$Dir/timeStamp.level0");
-
-#
-# Now remove the bad files, replacing them if possible with links to
-# earlier backups.
-#
-foreach my $file ( $xfer->getBadFiles ) {
- my $j;
- unlink("$Dir/$lastNum/$file");
- for ( $j = $i - 1 ; $j >= 0 ; $j-- ) {
- next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
- if ( !link("$Dir/$Backups[$j]{num}/$file", "$Dir/$lastNum/$file") ) {
- print(LOG $bpc->timeStamp,
- "Unable to link $lastNum/$file to"
- . " $Backups[$j]{num}/$file\n");
- } else {
- print(LOG $bpc->timeStamp,
- "Bad file $lastNum/$file replaced by link to"
- . " $Backups[$j]{num}/$file\n");
- }
- last;
- }
- if ( $j < 0 ) {
- print(LOG $bpc->timeStamp,
- "Removed bad file $lastNum/$file (no older"
- . " copy to link to)\n");
+ if ( !$Abort ) {
+ #
+ # wait a short while and see if the system is still alive
+ #
+ sleep(5);
+ if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+ $stat{hostAbort} = 1;
+ }
+ if ( $stat{hostAbort} ) {
+ $stat{hostError} = "lost network connection during backup";
+ }
+ print(LOG $bpc->timeStamp, "Backup aborted ($stat{hostError})\n");
+ $XferLOG->write(\"Backup aborted ($stat{hostError})\n");
+ } else {
+ $XferLOG->write(\"Backup aborted by user signal\n");
}
+
+ #
+ # Close the log file and call BackupFailCleanup, which exits.
+ #
+ BackupFailCleanup();
}
+my $newNum = BackupSave();
+
my $otherCount = $stat{xferErrCnt} - $stat{xferBadFileCnt}
- $stat{xferBadShareCnt};
print(LOG $bpc->timeStamp,
- "$type backup $lastNum complete, $stat{fileCnt} files,"
+ "$type backup $newNum complete, $stat{fileCnt} files,"
. " $stat{byteCnt} bytes,"
. " $stat{xferErrCnt} xferErrs ($stat{xferBadFileCnt} bad files,"
. " $stat{xferBadShareCnt} bad shares, $otherCount other)\n");
-BackupExpire($host);
+BackupExpire($client);
print("$type backup complete\n");
my($needLink) = @_;
print("nothing to do\n");
- print("link $host\n") if ( $needLink );
+ print("link $clientURI\n") if ( $needLink );
exit(0);
}
sub catch_signal
{
- my $signame = shift;
- my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+ my $sigName = shift;
- print(LOG $bpc->timeStamp, "cleaning up after signal $signame\n");
- $XferLOG->write(\"exiting after signal $signame\n");
- $XferLOG->close();
- if ( $xferPid > 0 ) {
- if ( kill(2, $xferPid) <= 0 ) {
- sleep(1);
- kill(9, $xferPid);
- }
+ #
+ # The first time we receive a signal we try to gracefully
+ # abort the backup. This allows us to keep a partial dump
+ # with the in-progress file deleted and attribute caches
+ # flushed to disk etc.
+ #
+ if ( !length($SigName) ) {
+ my $reason;
+ if ( $sigName eq "INT" ) {
+ $reason = "aborted by user (signal=$sigName)";
+ } else {
+ $reason = "aborted by signal=$sigName";
+ }
+ if ( $Pid == $$ ) {
+ #
+ # Parent logs a message
+ #
+ print(LOG $bpc->timeStamp,
+ "Aborting backup up after signal $sigName\n");
+
+ #
+ # Tell xfer to abort
+ #
+ $xfer->abort($reason);
+
+ #
+ # Send ALRMs to BackupPC_tarExtract if we are using it
+ #
+ if ( $tarPid > 0 ) {
+ kill($bpc->sigName2num("ARLM"), $tarPid);
+ }
+
+ #
+ # Schedule a 20 second timer in case the clean
+ # abort doesn't complete
+ #
+ alarm(20);
+ } else {
+ #
+ # Children ignore anything other than ALRM and INT
+ #
+ if ( $sigName ne "ALRM" && $sigName ne "INT" ) {
+ return;
+ }
+
+ #
+ # The child also tells xfer to abort
+ #
+ $xfer->abort($reason);
+
+ #
+ # Schedule a 15 second timer in case the clean
+ # abort doesn't complete
+ #
+ alarm(15);
+ }
+ $SigName = $sigName;
+ $Abort = 1;
+ return;
+ }
+
+ #
+ # This is a second signal: time to clean up.
+ #
+ if ( $Pid != $$ && ($sigName eq "ALRM" || $sigName eq "INT") ) {
+ #
+ # Children quit quietly on ALRM or INT
+ #
+ exit(1)
+ }
+
+ #
+ # Ignore other signals in children
+ #
+ return if ( $Pid != $$ );
+
+ $SIG{$sigName} = 'IGNORE';
+ UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
+ $XferLOG->write(\"exiting after signal $sigName\n");
+ if ( @xferPid ) {
+ kill($bpc->sigName2num("INT"), @xferPid);
+ sleep(1);
+ kill($bpc->sigName2num("KILL"), @xferPid);
}
if ( $tarPid > 0 ) {
- if ( kill(2, $tarPid) <= 0 ) {
- sleep(1);
- kill(9, $tarPid);
- }
+ kill($bpc->sigName2num("INT"), $tarPid);
+ sleep(1);
+ kill($bpc->sigName2num("KILL"), $tarPid);
}
- unlink("$Dir/timeStamp.level0");
- unlink("$Dir/NewFileList");
- unlink("$Dir/XferLOG.bad");
- unlink("$Dir/XferLOG.bad$fileExt");
- rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
- $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
- if ( $signame eq "INT" ) {
- print("dump failed: aborted by user (signal=$signame)\n");
+ if ( $sigName eq "INT" ) {
+ $stat{hostError} = "aborted by user (signal=$sigName)";
} else {
- print("dump failed: received signal=$signame\n");
+ $stat{hostError} = "received signal=$sigName";
}
- print("link $host\n") if ( $needLink );
- exit(1);
+ BackupFailCleanup();
+}
+
+sub CheckForNewFiles
+{
+ if ( -f _ && $File::Find::name !~ /\/fattrib$/ ) {
+ $nFilesTotal++;
+ } elsif ( -d _ ) {
+ #
+ # No need to check entire tree
+ #
+ $File::Find::prune = 1 if ( $nFilesTotal );
+ }
+}
+
+sub BackupFailCleanup
+{
+ my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+ my $keepPartial = 0;
+
+ #
+ # We keep this backup if it is a full and we actually backed
+ # up some files.
+ #
+ if ( $type eq "full" ) {
+ if ( $nFilesTotal == 0 && $xfer->getStats->{fileCnt} == 0 ) {
+ #
+ # Xfer didn't report any files, but check in the new
+ # directory just in case.
+ #
+ find(\&CheckForNewFiles, "$Dir/new");
+ $keepPartial = 1 if ( $nFilesTotal );
+ } else {
+ #
+ # Xfer reported some files
+ #
+ $keepPartial = 1;
+ }
+ }
+
+ #
+ # Don't keep partials if they are disabled
+ #
+ $keepPartial = 0 if ( $Conf{PartialAgeMax} < 0 );
+
+ if ( !$keepPartial ) {
+ #
+ # No point in saving this dump; get rid of eveything.
+ #
+ $XferLOG->close();
+ unlink("$Dir/timeStamp.level0") if ( -f "$Dir/timeStamp.level0" );
+ unlink("$Dir/SmbLOG.bad") if ( -f "$Dir/SmbLOG.bad" );
+ unlink("$Dir/SmbLOG.bad$fileExt") if ( -f "$Dir/SmbLOG.bad$fileExt" );
+ unlink("$Dir/XferLOG.bad") if ( -f "$Dir/XferLOG.bad" );
+ unlink("$Dir/XferLOG.bad$fileExt") if ( -f "$Dir/XferLOG.bad$fileExt" );
+ unlink("$Dir/NewFileList") if ( -f "$Dir/NewFileList" );
+ rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
+ print("dump failed: $stat{hostError}\n");
+ $XferLOG->close();
+ print("link $clientURI\n") if ( $needLink );
+ exit(1);
+ }
+ #
+ # Ok, now we should save this as a partial dump
+ #
+ $type = "partial";
+ my $newNum = BackupSave();
+ print("dump failed: $stat{hostError}\n");
+ print("link $clientURI\n") if ( $needLink );
+ print(LOG $bpc->timeStamp, "Saved partial dump $newNum\n");
+ exit(2);
}
#
#
sub BackupExpire
{
- my($host) = @_;
- my($Dir) = "$TopDir/pc/$host";
- my(@Backups) = $bpc->BackupInfoRead($host);
+ my($client) = @_;
+ my($Dir) = "$TopDir/pc/$client";
+ my(@Backups) = $bpc->BackupInfoRead($client);
my($cntFull, $cntIncr, $firstFull, $firstIncr, $oldestIncr, $oldestFull);
+ if ( $Conf{FullKeepCnt} <= 0 ) {
+ print(LOG $bpc->timeStamp,
+ "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n");
+ print(STDERR
+ "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n")
+ if ( $opts{v} );
+ return;
+ }
while ( 1 ) {
$cntFull = $cntIncr = 0;
$oldestIncr = $oldestFull = 0;
- for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
if ( $Backups[$i]{type} eq "full" ) {
$firstFull = $i if ( $cntFull == 0 );
$cntFull++;
#
print(LOG $bpc->timeStamp,
"removing incr backup $Backups[$firstIncr]{num}\n");
- $bpc->RmTreeDefer("$TopDir/trash",
- "$Dir/$Backups[$firstIncr]{num}");
- unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}")
- if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}" );
- unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}.z")
- if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}.z" );
- unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}")
- if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}" );
- unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}.z")
- if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}.z" );
- splice(@Backups, $firstIncr, 1);
- } elsif ( ($cntFull > $Conf{FullKeepCnt}
- || ($cntFull > $Conf{FullKeepCntMin}
- && $oldestFull > $Conf{FullAgeMax}))
+ BackupRemove($client, \@Backups, $firstIncr);
+ next;
+ }
+
+ #
+ # Delete any old full backups, according to $Conf{FullKeepCntMin}
+ # and $Conf{FullAgeMax}.
+ #
+ # First make sure that $Conf{FullAgeMax} is at least bigger
+ # than $Conf{FullPeriod} * $Conf{FullKeepCnt}, including
+ # the exponential array case.
+ #
+ my $fullKeepCnt = $Conf{FullKeepCnt};
+ $fullKeepCnt = [$fullKeepCnt] if ( ref($fullKeepCnt) ne "ARRAY" );
+ my $fullAgeMax;
+ my $fullPeriod = int(0.5 + $Conf{FullPeriod});
+ for ( my $i = 0 ; $i < @$fullKeepCnt ; $i++ ) {
+ $fullAgeMax += $fullKeepCnt->[$i] * $fullPeriod;
+ $fullPeriod *= 2;
+ }
+ $fullAgeMax += $fullPeriod; # add some buffer
+
+ if ( $cntFull > $Conf{FullKeepCntMin}
+ && $oldestFull > $Conf{FullAgeMax}
+ && $oldestFull > $fullAgeMax
+ && $Conf{FullKeepCntMin} > 0
+ && $Conf{FullAgeMax} > 0
&& (@Backups <= $firstFull + 1
|| !$Backups[$firstFull + 1]{noFill}) ) {
#
# filled.)
#
print(LOG $bpc->timeStamp,
- "removing full backup $Backups[$firstFull]{num}\n");
- $bpc->RmTreeDefer("$TopDir/trash",
- "$Dir/$Backups[$firstFull]{num}");
- unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}")
- if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}" );
- unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}.z")
- if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}.z" );
- unlink("$Dir/XferLOG.$Backups[$firstFull]{num}")
- if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}" );
- unlink("$Dir/XferLOG.$Backups[$firstFull]{num}.z")
- if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}.z" );
- splice(@Backups, $firstFull, 1);
- } else {
+ "removing old full backup $Backups[$firstFull]{num}\n");
+ BackupRemove($client, \@Backups, $firstFull);
+ next;
+ }
+
+ #
+ # Do new-style full backup expiry, which includes the the case
+ # where $Conf{FullKeepCnt} is an array.
+ #
+ last if ( !BackupFullExpire($client, \@Backups) );
+ }
+ $bpc->BackupInfoWrite($client, @Backups);
+}
+
+#
+# Handle full backup expiry, using exponential periods.
+#
+sub BackupFullExpire
+{
+ my($client, $Backups) = @_;
+ my $fullCnt = 0;
+ my $fullPeriod = $Conf{FullPeriod};
+ my $origFullPeriod = $fullPeriod;
+ my $fullKeepCnt = $Conf{FullKeepCnt};
+ my $fullKeepIdx = 0;
+ my(@delete, @fullList);
+
+ #
+ # Don't delete anything if $Conf{FullPeriod} or $Conf{FullKeepCnt} are
+ # not defined - possibly a corrupted config.pl file.
+ #
+ return if ( !defined($Conf{FullPeriod}) || !defined($Conf{FullKeepCnt}) );
+
+ #
+ # If regular backups are still disabled with $Conf{FullPeriod} < 0,
+ # we still expire backups based on a typical FullPeriod value - weekly.
+ #
+ $fullPeriod = 7 if ( $fullPeriod <= 0 );
+
+ $fullKeepCnt = [$fullKeepCnt] if ( ref($fullKeepCnt) ne "ARRAY" );
+
+ for ( my $i = 0 ; $i < @$Backups ; $i++ ) {
+ next if ( $Backups->[$i]{type} ne "full" );
+ push(@fullList, $i);
+ }
+ for ( my $k = @fullList - 1 ; $k >= 0 ; $k-- ) {
+ my $i = $fullList[$k];
+ my $prevFull = $fullList[$k-1] if ( $k > 0 );
+ #
+ # Don't delete any full that is followed by an unfilled backup,
+ # since it is needed for restore.
+ #
+ my $noDelete = $i + 1 < @$Backups ? $Backups->[$i+1]{noFill} : 0;
+
+ if ( !$noDelete &&
+ ($fullKeepIdx >= @$fullKeepCnt
+ || $k > 0
+ && $fullKeepIdx > 0
+ && $Backups->[$i]{startTime} - $Backups->[$prevFull]{startTime}
+ < ($fullPeriod - $origFullPeriod / 2) * 24 * 3600
+ )
+ ) {
+ #
+ # Delete the full backup
+ #
+ #printf("Deleting backup $i ($prevFull)\n");
+ push(@delete, $i);
+ } else {
+ $fullCnt++;
+ while ( $fullKeepIdx < @$fullKeepCnt
+ && $fullCnt >= $fullKeepCnt->[$fullKeepIdx] ) {
+ $fullKeepIdx++;
+ $fullCnt = 0;
+ $fullPeriod = 2 * $fullPeriod;
+ }
+ }
+ }
+ #
+ # Now actually delete the backups
+ #
+ for ( my $i = @delete - 1 ; $i >= 0 ; $i-- ) {
+ print(LOG $bpc->timeStamp,
+ "removing full backup $Backups->[$delete[$i]]{num}\n");
+ BackupRemove($client, $Backups, $delete[$i]);
+ }
+ return @delete;
+}
+
+#
+# Removes any partial backups
+#
+sub BackupPartialRemove
+{
+ my($client, $Backups) = @_;
+
+ for ( my $i = @$Backups - 1 ; $i >= 0 ; $i-- ) {
+ next if ( $Backups->[$i]{type} ne "partial" );
+ BackupRemove($client, $Backups, $i);
+ }
+}
+
+sub BackupSave
+{
+ my @Backups = $bpc->BackupInfoRead($client);
+ my $num = -1;
+
+ #
+ # Since we got a good backup we should remove any partial dumps
+ # (the new backup might also be a partial, but that's ok).
+ #
+ BackupPartialRemove($client, \@Backups);
+
+ #
+ # Number the new backup
+ #
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ $num = $Backups[$i]{num} if ( $num < $Backups[$i]{num} );
+ }
+ $num++;
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$num") if ( -d "$Dir/$num" );
+ if ( !rename("$Dir/new", "$Dir/$num") ) {
+ print(LOG $bpc->timeStamp, "Rename $Dir/new -> $Dir/$num failed\n");
+ $stat{xferOK} = 0;
+ }
+ $needLink = 1 if ( -f "$Dir/NewFileList" );
+
+ #
+ # Add the new backup information to the backup file
+ #
+ my $i = @Backups;
+ $Backups[$i]{num} = $num;
+ $Backups[$i]{type} = $type;
+ $Backups[$i]{startTime} = $startTime;
+ $Backups[$i]{endTime} = $endTime;
+ $Backups[$i]{size} = $sizeTotal;
+ $Backups[$i]{nFiles} = $nFilesTotal;
+ $Backups[$i]{xferErrs} = $stat{xferErrCnt} || 0;
+ $Backups[$i]{xferBadFile} = $stat{xferBadFileCnt} || 0;
+ $Backups[$i]{xferBadShare} = $stat{xferBadShareCnt} || 0;
+ $Backups[$i]{nFilesExist} = $nFilesExist;
+ $Backups[$i]{sizeExist} = $sizeExist;
+ $Backups[$i]{sizeExistComp} = $sizeExistComp;
+ $Backups[$i]{tarErrs} = $tarErrs;
+ $Backups[$i]{compress} = $Conf{CompressLevel};
+ $Backups[$i]{noFill} = $type eq "incr" ? 1 : 0;
+ $Backups[$i]{level} = $type eq "incr" ? 1 : 0;
+ $Backups[$i]{mangle} = 1; # name mangling always on for v1.04+
+ $bpc->BackupInfoWrite($client, @Backups);
+
+ unlink("$Dir/timeStamp.level0") if ( -f "$Dir/timeStamp.level0" );
+ foreach my $ext ( qw(bad bad.z) ) {
+ next if ( !-f "$Dir/XferLOG.$ext" );
+ unlink("$Dir/XferLOG.$ext.old") if ( -f "$Dir/XferLOG.$ext" );
+ rename("$Dir/XferLOG.$ext", "$Dir/XferLOG.$ext.old");
+ }
+
+ #
+ # Now remove the bad files, replacing them if possible with links to
+ # earlier backups.
+ #
+ foreach my $f ( $xfer->getBadFiles ) {
+ my $j;
+ my $shareM = $bpc->fileNameEltMangle($f->{share});
+ my $fileM = $bpc->fileNameMangle($f->{file});
+ unlink("$Dir/$num/$shareM/$fileM");
+ for ( $j = $i - 1 ; $j >= 0 ; $j-- ) {
+ my $file;
+ if ( $Backups[$j]{mangle} ) {
+ $file = "$shareM/$fileM";
+ } else {
+ $file = "$f->{share}/$f->{file}";
+ }
+ next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
+ if ( !link("$Dir/$Backups[$j]{num}/$file",
+ "$Dir/$num/$shareM/$fileM") ) {
+ my $str = \"Unable to link $num/$f->{share}/$f->{file} to"
+ . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+ $XferLOG->write(\$str);
+ } else {
+ my $str = "Bad file $num/$f->{share}/$f->{file} replaced"
+ . " by link to"
+ . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+ $XferLOG->write(\$str);
+ }
last;
}
+ if ( $j < 0 ) {
+ my $str = "Removed bad file $num/$f->{share}/$f->{file}"
+ . " (no older copy to link to)\n";
+ $XferLOG->write(\$str);
+ }
}
- $bpc->BackupInfoWrite($host, @Backups);
+ $XferLOG->close();
+ rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$num$fileExt");
+ rename("$Dir/NewFileList", "$Dir/NewFileList.$num");
+
+ return $num;
+}
+
+#
+# Removes a specific backup
+#
+sub BackupRemove
+{
+ my($client, $Backups, $idx) = @_;
+ my($Dir) = "$TopDir/pc/$client";
+
+ $bpc->RmTreeDefer("$TopDir/trash",
+ "$Dir/$Backups->[$idx]{num}");
+ unlink("$Dir/SmbLOG.$Backups->[$idx]{num}")
+ if ( -f "$Dir/SmbLOG.$Backups->[$idx]{num}" );
+ unlink("$Dir/SmbLOG.$Backups->[$idx]{num}.z")
+ if ( -f "$Dir/SmbLOG.$Backups->[$idx]{num}.z" );
+ unlink("$Dir/XferLOG.$Backups->[$idx]{num}")
+ if ( -f "$Dir/XferLOG.$Backups->[$idx]{num}" );
+ unlink("$Dir/XferLOG.$Backups->[$idx]{num}.z")
+ if ( -f "$Dir/XferLOG.$Backups->[$idx]{num}.z" );
+ splice(@{$Backups}, $idx, 1);
}
sub CorrectHostCheck
{
my($hostIP, $host) = @_;
- return if ( $hostIP eq $host && !$Conf{FixedIPNetBiosNameCheck} );
+ return if ( $hostIP eq $host && !$Conf{FixedIPNetBiosNameCheck}
+ || $Conf{NmbLookupCmd} eq "" );
my($netBiosHost, $netBiosUser) = $bpc->NetBiosInfoGet($hostIP);
return "host $host has mismatching netbios name $netBiosHost"
- if ( $netBiosHost ne $host );
+ if ( $netBiosHost ne $host );
return;
}
+
+#
+# The Xfer method might tell us from time to time about processes
+# it forks. We tell BackupPC about this (for status displays) and
+# keep track of the pids in case we cancel the backup
+#
+sub pidHandler
+{
+ @xferPid = @_;
+ @xferPid = grep(/./, @xferPid);
+ return if ( !@xferPid && $tarPid < 0 );
+ my @pids = @xferPid;
+ push(@pids, $tarPid) if ( $tarPid > 0 );
+ my $str = join(",", @pids);
+ $XferLOG->write(\"Xfer PIDs are now $str\n") if ( defined($XferLOG) );
+ print("xferPids $str\n");
+}
+
+#
+# Run an optional pre- or post-dump command
+#
+sub UserCommandRun
+{
+ my($cmdType) = @_;
+
+ return if ( !defined($Conf{$cmdType}) );
+ my $vars = {
+ xfer => $xfer,
+ client => $client,
+ host => $host,
+ hostIP => $hostIP,
+ user => $Hosts->{$client}{user},
+ moreUsers => $Hosts->{$client}{moreUsers},
+ share => $ShareNames->[0],
+ shares => $ShareNames,
+ XferMethod => $Conf{XferMethod},
+ sshPath => $Conf{SshPath},
+ LOG => *LOG,
+ XferLOG => $XferLOG,
+ stat => \%stat,
+ xferOK => $stat{xferOK} || 0,
+ hostError => $stat{hostError},
+ type => $type,
+ cmdType => $cmdType,
+ };
+ my $cmd = $bpc->cmdVarSubstitute($Conf{$cmdType}, $vars);
+ $XferLOG->write(\"Executing $cmdType: @$cmd\n");
+ #
+ # Run the user's command, dumping the stdout/stderr into the
+ # Xfer log file. Also supply the optional $vars and %Conf in
+ # case the command is really perl code instead of a shell
+ # command.
+ #
+ $bpc->cmdSystemOrEval($cmd,
+ sub {
+ $XferLOG->write(\$_[0]);
+ },
+ $vars, \%Conf);
+}