display all backups which we don't know state of in web interface
[BackupPC.git] / bin / BackupPC_dump
index 0b46c90..cc633f9 100755 (executable)
@@ -1,11 +1,11 @@
-#!/bin/perl -T
+#!/usr/bin/perl
 #============================================================= -*-perl-*-
 #
 # BackupPC_dump: Dump a single client.
 #
 # DESCRIPTION
 #
-#   Usage: BackupPC_dump [-i] [-f] [-d] [-e] <client>
+#   Usage: BackupPC_dump [-i] [-f] [-F] [-I] [-d] [-e] [-v] <client>
 #
 #   Flags:
 #
 #
 #     -f   Do a full dump, overriding any scheduling.
 #
+#     -I   Do an increment dump if the regular schedule requires a
+#          full or incremental, otherwise do nothing (a full is done
+#          if no dumps have yet succeeded)
+#
+#     -F   Do a full dump if the regular schedule requires a
+#          full or incremental, otherwise do nothing
+#
 #     -d   Host is a DHCP pool address, and the client argument
 #          just an IP address.  We lookup the NetBios name from
 #          the IP address.
 #          dhcp hosts that are no longer on the network will not expire
 #          old backups.
 #
+#     -v   verbose.  for manual usage: prints failure reasons in more detail.
+#
 #   BackupPC_dump is run periodically by BackupPC to backup $client.
 #   The file $TopDir/pc/$client/backups is read to decide whether a
 #   full or incremental backup needs to be run.  If no backup is
 #   scheduled, or a ping to $client fails, then BackupPC_dump quits.
 #
-#   The backup is done using the selected XferMethod (smb, tar, rsync etc),
-#   extracting the dump into $TopDir/pc/$client/new.  The xfer output is
-#   put into $TopDir/pc/$client/XferLOG.
+#   The backup is done using the selected XferMethod (smb, tar, rsync,
+#   backuppcd etc), extracting the dump into $TopDir/pc/$client/new.
+#   The xfer output is put into $TopDir/pc/$client/XferLOG.
 #
 #   If the dump succeeds (based on parsing the output of the XferMethod):
 #     - $TopDir/pc/$client/new is renamed to $TopDir/pc/$client/nnn, where
@@ -50,7 +59,7 @@
 #   Craig Barratt  <cbarratt@users.sourceforge.net>
 #
 # COPYRIGHT
-#   Copyright (C) 2001  Craig Barratt
+#   Copyright (C) 2001-2009  Craig Barratt
 #
 #   This program is free software; you can redistribute it and/or modify
 #   it under the terms of the GNU General Public License as published by
 #
 #========================================================================
 #
-# Version 2.0.0_CVS, released 18 Jan 2003.
+# Version 3.2.0, released 31 Jul 2010.
 #
 # See http://backuppc.sourceforge.net.
 #
 #========================================================================
 
 use strict;
+no  utf8;
 use lib "/usr/local/BackupPC/lib";
 use BackupPC::Lib;
 use BackupPC::FileZIO;
-use BackupPC::Xfer::Smb;
-use BackupPC::Xfer::Tar;
-use BackupPC::Xfer::Rsync;
+use BackupPC::Storage;
+use BackupPC::Xfer;
+use Encode;
+use Socket;
 use File::Path;
+use File::Find;
 use Getopt::Std;
 
 ###########################################################################
@@ -93,25 +105,34 @@ my $TopDir = $bpc->TopDir();
 my $BinDir = $bpc->BinDir();
 my %Conf   = $bpc->Conf();
 my $NeedPostCmd;
+my $Hosts;
+my $SigName;
+my $Abort;
 
 $bpc->ChildInit();
 
 my %opts;
-getopts("defi", \%opts);
-if ( @ARGV != 1 ) {
-    print("usage: $0 [-d] [-e] [-f] [-i] <client>\n");
+if ( !getopts("defivFI", \%opts) || @ARGV != 1 ) {
+    print("usage: $0 [-d] [-e] [-f] [-i] [-F] [-I] [-v] <client>\n");
     exit(1);
 }
-if ( $ARGV[0] !~ /^([\w\.-\s]+)$/ ) {
+if ( $ARGV[0] !~ /^([\w\.\s-]+)$/ ) {
     print("$0: bad client name '$ARGV[0]'\n");
     exit(1);
 }
+if ( (defined($opts{f}) + defined($opts{i}) + defined($opts{F}) + defined($opts{I})) > 1 ) {
+    print("$0: exiting because you can only use one of -f, -i, -F, and -I\n");
+    exit(1);
+}
+
 my $client = $1;   # BackupPC's client name (might not be real host name)
 my $hostIP;        # this is the IP address
 my $host;          # this is the real host name
 
 my($clientURI, $user);
 
+$bpc->verbose(1) if ( $opts{v} );
+
 if ( $opts{d} ) {
     #
     # The client name $client is simply a DHCP address.  We need to check
@@ -119,17 +140,37 @@ if ( $opts{d} ) {
     # host name via NetBios using nmblookup.
     #
     $hostIP = $client;
-    exit(1) if ( $bpc->CheckHostAlive($hostIP) < 0 );
+    if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+       print(STDERR "Exiting because CheckHostAlive($hostIP) failed\n")
+                           if ( $opts{v} );
+       exit(1);
+    }
+    if ( $Conf{NmbLookupCmd} eq "" ) {
+       print(STDERR "Exiting because \$Conf{NmbLookupCmd} is empty\n")
+                           if ( $opts{v} );
+       exit(1);
+    }
     ($client, $user) = $bpc->NetBiosInfoGet($hostIP);
-    exit(1) if ( $host !~ /^([\w\.-]+)$/ );
-    my $hosts = $bpc->HostInfoRead($client);
-    exit(1) if ( !defined($hosts->{$client}) );
+    if ( $client !~ /^([\w\.\s-]+)$/ ) {
+       print(STDERR "Exiting because NetBiosInfoGet($hostIP) returned"
+                   . " '$client', an invalid host name\n") if ( $opts{v} );
+       exit(1)
+    }
+    $Hosts = $bpc->HostInfoRead($client);
     $host = $client;
+} else {
+    $Hosts = $bpc->HostInfoRead($client);
+}
+if ( !defined($Hosts->{$client}) ) {
+    print(STDERR "Exiting because host $client does not exist in the"
+               . " hosts file\n") if ( $opts{v} );
+    exit(1)
 }
 
 my $Dir     = "$TopDir/pc/$client";
-my $xferPid = -1;
+my @xferPid = ();
 my $tarPid  = -1;
+my $completionPercent;
 
 #
 # Re-read config file, so we can include the PC-specific config
@@ -147,6 +188,11 @@ if ( defined(my $error = $bpc->ConfigRead($client)) ) {
 $SIG{INT}  = \&catch_signal;
 $SIG{ALRM} = \&catch_signal;
 $SIG{TERM} = \&catch_signal;
+$SIG{PIPE} = \&catch_signal;
+$SIG{STOP} = \&catch_signal;
+$SIG{TSTP} = \&catch_signal;
+$SIG{TTIN} = \&catch_signal;
+my $Pid = $$;
 
 #
 # Make sure we eventually timeout if there is no activity from
@@ -158,38 +204,31 @@ mkpath($Dir, 0, 0777) if ( !-d $Dir );
 if ( !-f "$Dir/LOCK" ) {
     open(LOCK, ">", "$Dir/LOCK") && close(LOCK);
 }
-open(LOG, ">>", "$Dir/LOG");
-select(LOG); $| = 1; select(STDOUT);
 
-if ( !$opts{d} ) {
+my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+my $logPath = sprintf("$Dir/LOG.%02d%04d", $mon + 1, $year + 1900);
+
+if ( !-f $logPath ) {
     #
-    # In the non-DHCP case, make sure the host can be looked up
-    # via NS, or otherwise find the IP address via NetBios.
+    # Compress and prune old log files
     #
-    if ( $Conf{ClientNameAlias} ne "" ) {
-        $host = $Conf{ClientNameAlias};
-    } else {
-        $host = $client;
-    }
-    if ( !defined(gethostbyname($host)) ) {
-        #
-        # Ok, NS doesn't know about it.  Maybe it is a NetBios name
-        # instead.
-        #
-        if ( !defined($hostIP = $bpc->NetBiosHostIPFind($host)) ) {
-           print(LOG $bpc->timeStamp,
-                           "dump failed: Can't find host $host\n");
-            print("dump failed: Can't find host $host\n");
-            exit(1);
+    my $lastLog = $Conf{MaxOldPerPCLogFiles} - 1;
+    foreach my $file ( $bpc->sortedPCLogFiles($client) ) {
+        if ( $lastLog <= 0 ) {
+            unlink($file);
+            next;
         }
-    } else {
-        $hostIP = $host;
+        $lastLog--;
+        next if ( $file =~ /\.z$/ || !$Conf{CompressLevel} );
+        BackupPC::FileZIO->compressCopy($file,
+                                        "$file.z",
+                                        undef,
+                                        $Conf{CompressLevel}, 1);
     }
 }
 
-###########################################################################
-# Figure out what to do and do it
-###########################################################################
+open(LOG, ">>", $logPath);
+select(LOG); $| = 1; select(STDOUT);
 
 #
 # For the -e option we just expire backups and quit
@@ -199,6 +238,19 @@ if ( $opts{e} ) {
     exit(0);
 }
 
+#
+# For archive hosts we don't bother any further
+#
+if ($Conf{XferMethod} eq "archive" ) {
+    print(STDERR "Exiting because the XferMethod is set to archive\n")
+                if ( $opts{v} );
+    exit(0);
+}
+
+###########################################################################
+# Figure out what to do and do it
+###########################################################################
+
 #
 # See if we should skip this host during a certain range
 # of times.
@@ -221,17 +273,42 @@ $bpc->ServerDisconnect();
 if ( $opts{d} ) {
     if ( $StatusHost{activeJob} ) {
         # oops, something is already running for this host
+       print(STDERR "Exiting because backup is already running for $client\n")
+                       if ( $opts{v} );
         exit(0);
     }
     print("DHCP $hostIP $clientURI\n");
 }
 
-my($needLink, @Backups, $type, $lastBkupNum, $lastFullBkupNum);
-my $lastFull = 0;
-my $lastIncr = 0;
+my($needLink, @Backups, $type);
+my($incrBaseTime, $incrBaseBkupNum, $incrBaseLevel, $incrLevel);
+my $lastFullTime = 0;
+my $lastIncrTime = 0;
+my $partialIdx = -1;
+my $partialNum;
+my $partialFileCnt;
+my $lastBkupNum;
+my $lastPartial = 0;
+
+#
+# Maintain backward compatibility with $Conf{FullPeriod} == -1 or -2
+# meaning disable backups
+#
+$Conf{BackupsDisable} = -$Conf{FullPeriod}
+            if ( !$Conf{BackupsDisable} && $Conf{FullPeriod} < 0 );
 
-if ( $Conf{FullPeriod} == -1 && !$opts{f} && !$opts{i}
-        || $Conf{FullPeriod} == -2 ) {
+if ( $Conf{BackupsDisable} == 1 && !$opts{f} && !$opts{i}
+        || $Conf{BackupsDisable} == 2 ) {
+    print(STDERR "Exiting because backups are disabled with"
+       . " \$Conf{BackupsDisable} = $Conf{BackupsDisable}\n") if ( $opts{v} );
+    #
+    # Tell BackupPC to ignore old failed backups on hosts that
+    # have backups disabled.
+    #
+    print("backups disabled\n")
+               if ( defined($StatusHost{errorTime})
+                    && $StatusHost{reason} ne "Reason_backup_done"
+                    && time - $StatusHost{errorTime} > 4 * 24 * 3600 );
     NothingToDo($needLink);
 }
 
@@ -239,17 +316,46 @@ if ( !$opts{i} && !$opts{f} && $Conf{BlackoutGoodCnt} >= 0
              && $StatusHost{aliveCnt} >= $Conf{BlackoutGoodCnt} ) {
     my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
     my($currHours) = $hour + $min / 60 + $sec / 3600;
-    if ( $Conf{BlackoutHourBegin} <= $currHours
-              && $currHours <= $Conf{BlackoutHourEnd}
-              && grep($_ == $wday, @{$Conf{BlackoutWeekDays}}) ) {
-#        print(LOG $bpc->timeStamp, "skipping because of blackout"
-#                    . " (alive $StatusHost{aliveCnt} times)\n");
-        NothingToDo($needLink);
+    my $blackout;
+
+    foreach my $p ( @{$Conf{BlackoutPeriods}} ) {
+        #
+        # Allow blackout to span midnight (specified by hourBegin
+        # being greater than hourEnd)
+        #
+        next if ( ref($p->{weekDays}) ne "ARRAY" 
+                    || !defined($p->{hourBegin})
+                    || !defined($p->{hourEnd})
+                );
+        my $matchWday = $wday;
+        if ( $p->{hourBegin} > $p->{hourEnd} ) {
+            $blackout = $p->{hourBegin} <= $currHours
+                          || $currHours <= $p->{hourEnd};
+            if ( $currHours <= $p->{hourEnd} ) {
+                #
+                # This is after midnight, so decrement the weekday for the
+                # weekday check (eg: Monday 11pm-1am means Monday 2300 to
+                # Tuesday 0100, not Monday 2300-2400 plus Monday 0000-0100).
+                #
+                $matchWday--;
+                $matchWday += 7 if ( $matchWday < 0 );
+            }
+        } else {
+            $blackout = $p->{hourBegin} <= $currHours
+                          && $currHours <= $p->{hourEnd};
+        }
+        if ( $blackout && grep($_ == $matchWday, @{$p->{weekDays}}) ) {
+#           print(LOG $bpc->timeStamp, "skipping because of blackout"
+#                      . " (alive $StatusHost{aliveCnt} times)\n");
+            print(STDERR "Skipping $client because of blackout\n")
+                            if ( $opts{v} );
+            NothingToDo($needLink);
+        }
     }
 }
 
 if ( !$opts{i} && !$opts{f} && $StatusHost{backoffTime} > time ) {
-    printf(LOG "%sskipping because of user requested delay (%.1f hours left)",
+    printf(LOG "%sskipping because of user requested delay (%.1f hours left)\n",
                 $bpc->timeStamp, ($StatusHost{backoffTime} - time) / 3600);
     NothingToDo($needLink);
 }
@@ -259,41 +365,151 @@ if ( !$opts{i} && !$opts{f} && $StatusHost{backoffTime} > time ) {
 #
 BackupExpire($client);
 
+my(@lastIdxByLevel, $incrCntSinceFull);
+
 #
 # Read Backup information, and find times of the most recent full and
-# incremental backups
+# incremental backups.  Also figure out which backup we will use
+# as a starting point for an incremental.
 #
 @Backups = $bpc->BackupInfoRead($client);
 for ( my $i = 0 ; $i < @Backups ; $i++ ) {
     $needLink = 1 if ( $Backups[$i]{nFilesNew} eq ""
                         || -f "$Dir/NewFileList.$Backups[$i]{num}" );
-    $lastBkupNum = $Backups[$i]{num};
     if ( $Backups[$i]{type} eq "full" ) {
-       if ( $lastFull < $Backups[$i]{startTime} ) {
-           $lastFull = $Backups[$i]{startTime};
-           $lastFullBkupNum = $Backups[$i]{num};
+        $incrCntSinceFull = 0;
+        $lastBkupNum = $Backups[$i]{num};
+        $lastIdxByLevel[0] = $i;
+       if ( $lastFullTime < $Backups[$i]{startTime} ) {
+           $lastFullTime = $Backups[$i]{startTime};
        }
-    } else {
-        $lastIncr = $Backups[$i]{startTime}
-                if ( $lastIncr < $Backups[$i]{startTime} );
+    } elsif ( $Backups[$i]{type} eq "incr" ) {
+        $incrCntSinceFull++;
+        $lastBkupNum = $Backups[$i]{num};
+        $lastIdxByLevel[$Backups[$i]{level}] = $i;
+        $lastIncrTime = $Backups[$i]{startTime}
+                if ( $lastIncrTime < $Backups[$i]{startTime} );
+    } elsif ( $Backups[$i]{type} eq "partial" ) {
+        $partialIdx     = $i;
+        $lastPartial    = $Backups[$i]{startTime};
+        $partialNum     = $Backups[$i]{num};
+        $partialFileCnt = $Backups[$i]{nFiles};
     }
 }
 
 #
 # Decide whether we do nothing, or a full or incremental backup.
 #
-if ( @Backups == 0
+my $needs_full = (time - $lastFullTime > $Conf{FullPeriod} * 24 * 3600
+               && time - $lastIncrTime > $Conf{IncrPeriod} * 24 * 3600);
+my $needs_incr = (time - $lastIncrTime > $Conf{IncrPeriod} * 24 * 3600
+               && time - $lastFullTime > $Conf{IncrPeriod} * 24 * 3600);
+
+if ( $lastFullTime == 0
         || $opts{f}
-        || (!$opts{i} && (time - $lastFull > $Conf{FullPeriod} * 24*3600
-            && time - $lastIncr > $Conf{IncrPeriod} * 24*3600)) ) {
+        || (!$opts{i} && !$opts{I} && $needs_full)
+        || ( $opts{F} && $needs_incr) ) {
     $type = "full";
-} elsif ( $opts{i} || (time - $lastIncr > $Conf{IncrPeriod} * 24*3600
-        && time - $lastFull > $Conf{IncrPeriod} * 24*3600) ) {
+    $incrLevel = 0;
+    $incrBaseBkupNum = $lastBkupNum;
+} elsif ( $opts{i}
+        || $needs_incr
+        || ($opts{I} && $needs_full) ) {
     $type = "incr";
+    #
+    # For an incremental backup, figure out which level we should
+    # do and the index of the reference backup, which is the most
+    # recent backup at any lower level.
+    #
+    @{$Conf{IncrLevels}} = [$Conf{IncrLevels}]
+                            unless ref($Conf{IncrLevels}) eq "ARRAY";
+    @{$Conf{IncrLevels}} = [1] if ( !@{$Conf{IncrLevels}} );
+    $incrCntSinceFull = $incrCntSinceFull % @{$Conf{IncrLevels}};
+    $incrLevel = $Conf{IncrLevels}[$incrCntSinceFull];
+    for ( my $i = 0 ; $i < $incrLevel ; $i++ ) {
+        my $idx = $lastIdxByLevel[$i];
+        next if ( !defined($idx) );
+        if ( !defined($incrBaseTime)
+                || $Backups[$idx]{startTime} > $incrBaseTime ) {
+            $incrBaseBkupNum = $Backups[$idx]{num};
+            $incrBaseLevel   = $Backups[$idx]{level};
+            $incrBaseTime    = $Backups[$idx]{startTime};
+        }
+    }
+    #
+    # Can't find any earlier lower-level backup!  Shouldn't
+    # happen - just do full instead
+    #
+    if ( !defined($incrBaseBkupNum) || $incrLevel < 1 ) {
+        $type = "full";
+        $incrBaseBkupNum = $lastBkupNum;
+    }
 } else {
     NothingToDo($needLink);
 }
 
+#
+# Create top-level directories if they don't exist
+#
+foreach my $dir ( (
+            "$Conf{TopDir}",
+            "$Conf{TopDir}/pool",
+            "$Conf{TopDir}/cpool",
+            "$Conf{TopDir}/pc",
+            "$Conf{TopDir}/trash",
+        ) ) {
+    next if ( -d $dir );
+    mkpath($dir, 0, 0750);
+    if ( !-d $dir ) {
+        print("Failed to create $dir\n");
+        printf(LOG "%sFailed to create directory %s\n", $bpc->timeStamp, $dir);
+        print("link $clientURI\n") if ( $needLink );
+        exit(1);
+    } else {
+        printf(LOG "%sCreated directory %s\n", $bpc->timeStamp, $dir);
+    }
+}
+
+if ( !$bpc->HardlinkTest($Dir, "$TopDir/cpool") ) {
+    print(LOG $bpc->timeStamp, "Can't create a test hardlink between a file"
+               . " in $Dir and $TopDir/cpool.  Either these are different"
+               . " file systems, or this file system doesn't support hardlinks,"
+               . " or these directories don't exist, or there is a permissions"
+               . " problem, or the file system is out of inodes or full.  Use"
+               . " df, df -i, and ls -ld to check each of these possibilities."
+               . " Quitting...\n");
+    print("test hardlink between $Dir and $TopDir/cpool failed\n");
+    print("link $clientURI\n") if ( $needLink );
+    exit(1);
+}
+
+if ( !$opts{d} ) {
+    #
+    # In the non-DHCP case, make sure the host can be looked up
+    # via NS, or otherwise find the IP address via NetBios.
+    #
+    if ( $Conf{ClientNameAlias} ne "" ) {
+        $host = $Conf{ClientNameAlias};
+    } else {
+        $host = $client;
+    }
+    if ( !defined(gethostbyname($host)) ) {
+        #
+        # Ok, NS doesn't know about it.  Maybe it is a NetBios name
+        # instead.
+        #
+       print(STDERR "Name server doesn't know about $host; trying NetBios\n")
+                       if ( $opts{v} );
+        if ( !defined($hostIP = $bpc->NetBiosHostIPFind($host)) ) {
+           print(LOG $bpc->timeStamp, "Can't find host $host via netbios\n");
+            print("host not found\n");
+            exit(1);
+        }
+    } else {
+        $hostIP = $host;
+    }
+}
+
 #
 # Check if $host is alive
 #
@@ -331,7 +547,11 @@ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
 #
 # Setup file extension for compression and open XferLOG output file
 #
-$Conf{CompressLevel} = 0 if ( !BackupPC::FileZIO->compOk );
+if ( $Conf{CompressLevel} && !BackupPC::FileZIO->compOk ) {
+    print(LOG $bpc->timeStamp, "dump failed: can't find Compress::Zlib\n");
+    print("dump failed: can't find Compress::Zlib\n");
+    exit(1);
+}
 my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
 my $XferLOG = BackupPC::FileZIO->open("$Dir/XferLOG$fileExt", 1,
                                      $Conf{CompressLevel});
@@ -341,69 +561,109 @@ if ( !defined($XferLOG) ) {
     print("dump failed: unable to open/create $Dir/XferLOG$fileExt\n");
     exit(1);
 }
-unlink("$Dir/NewFileList");
-my $startTime = time();
 
+#
+# Ignore the partial dump in the case of an incremental
+# or when the partial is too old.  A partial is a partial full.
+#
+if ( $type ne "full" || time - $lastPartial > $Conf{PartialAgeMax} * 24*3600 ) {
+    $partialNum = undef;
+    $partialIdx = -1;
+}
+
+#
+# If this is a partial, copy the old XferLOG file
+#
+if ( $partialNum ) {
+    my($compress, $fileName);
+    if ( -f "$Dir/XferLOG.$partialNum.z" ) {
+       $fileName = "$Dir/XferLOG.$partialNum.z";
+       $compress = 1;
+    } elsif ( -f "$Dir/XferLOG.$partialNum" ) {
+       $fileName = "$Dir/XferLOG.$partialNum";
+       $compress = 0;
+    }
+    if ( my $oldLOG = BackupPC::FileZIO->open($fileName, 0, $compress) ) {
+       my $data;
+       while ( $oldLOG->read(\$data, 65536) > 0 ) {
+           $XferLOG->write(\$data);
+       }
+       $oldLOG->close;
+    }
+}
+
+$XferLOG->writeTeeStderr(1) if ( $opts{v} );
+unlink("$Dir/NewFileList") if ( -f "$Dir/NewFileList" );
+
+my $startTime     = time();
 my $tarErrs       = 0;
 my $nFilesExist   = 0;
 my $sizeExist     = 0;
 my $sizeExistComp = 0;
 my $nFilesTotal   = 0;
 my $sizeTotal     = 0;
-my($logMsg, %stat, $xfer, $ShareNames);
+my($logMsg, %stat, $xfer, $ShareNames, $noFilesErr);
 my $newFilesFH;
 
-if ( $Conf{XferMethod} eq "tar" ) {
-    $ShareNames = $Conf{TarShareName};
-} elsif ( $Conf{XferMethod} eq "rsync" || $Conf{XferMethod} eq "rsyncd" ) {
-    $ShareNames = $Conf{RsyncShareName};
-} else {
-    $ShareNames = $Conf{SmbShareName};
-}
-
-$ShareNames = [ $ShareNames ] unless ref($ShareNames) eq "ARRAY";
+$ShareNames = BackupPC::Xfer::getShareNames(\%Conf);
 
 #
 # Run an optional pre-dump command
 #
 UserCommandRun("DumpPreUserCmd");
+if ( $? && $Conf{UserCmdCheckStatus} ) {
+    print(LOG $bpc->timeStamp,
+            "DumpPreUserCmd returned error status $?... exiting\n");
+    $XferLOG->write(\"DumpPreUserCmd returned error status $?... exiting\n");
+    $stat{hostError} = "DumpPreUserCmd returned error status $?";
+    BackupFailCleanup();
+}
 $NeedPostCmd = 1;
 
 #
 # Now backup each of the shares
 #
+my $shareDuplicate = {};
 for my $shareName ( @$ShareNames ) {
     local(*RH, *WH);
 
+    #
+    # Convert $shareName to utf8 octets
+    #
+    $shareName = encode("utf8", $shareName);
     $stat{xferOK} = $stat{hostAbort} = undef;
     $stat{hostError} = $stat{lastOutputLine} = undef;
-    if ( -d "$Dir/new/$shareName" ) {
+    if ( $shareName eq "" ) {
+        print(LOG $bpc->timeStamp,
+                  "unexpected empty share name skipped\n");
+        next;
+    }
+    if ( $shareDuplicate->{$shareName} ) {
         print(LOG $bpc->timeStamp,
                   "unexpected repeated share name $shareName skipped\n");
         next;
     }
+    $shareDuplicate->{$shareName} = 1;
 
-    if ( $Conf{XferMethod} eq "tar" ) {
-        #
-        # Use tar (eg: tar/ssh) as the transport program.
-        #
-        $xfer = BackupPC::Xfer::Tar->new($bpc);
-    } elsif ( $Conf{XferMethod} eq "rsync" || $Conf{XferMethod} eq "rsyncd" ) {
-        #
-        # Use rsync as the transport program.
-        #
-        if ( !defined($xfer = BackupPC::Xfer::Rsync->new($bpc)) ) {
-            my $errStr = BackupPC::Xfer::Rsync::errStr;
-            print(LOG $bpc->timeStamp, "dump failed: $errStr\n");
-            print("dump failed: $errStr\n");
-            UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
-            exit(1);
-        }
-    } else {
-        #
-        # Default is to use smbclient (smb) as the transport program.
-        #
-        $xfer = BackupPC::Xfer::Smb->new($bpc);
+    UserCommandRun("DumpPreShareCmd", $shareName);
+    if ( $? && $Conf{UserCmdCheckStatus} ) {
+        print(LOG $bpc->timeStamp,
+                "DumpPreShareCmd returned error status $?... exiting\n");
+        UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
+        $XferLOG->write(\"DumpPreShareCmd returned error status $?... exiting\n");
+        $stat{hostError} = "DumpPreShareCmd returned error status $?";
+        BackupFailCleanup();
+    }
+
+    $xfer = BackupPC::Xfer::create($Conf{XferMethod}, $bpc);
+    if ( !defined($xfer) ) {
+        my $errStr = BackupPC::Xfer::errStr();
+        print(LOG $bpc->timeStamp, "dump failed: $errStr\n");
+        UserCommandRun("DumpPostShareCmd", $shareName) if ( $NeedPostCmd );
+        UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
+        $XferLOG->write(\"BackupPC::Xfer::create failed: $errStr\n");
+        $stat{hostError} = $errStr;
+        BackupFailCleanup();
     }
 
     my $useTar = $xfer->useTar;
@@ -413,12 +673,22 @@ for my $shareName ( @$ShareNames ) {
        # This xfer method outputs a tar format file, so we start a
        # BackupPC_tarExtract to extract the data.
        #
-       # Create a pipe to connect the Xfer method to BackupPC_tarExtract
+       # Create a socketpair to connect the Xfer method to BackupPC_tarExtract
        # WH is the write handle for writing, provided to the transport
-       # program, and RH is the other end of the pipe for reading,
+       # program, and RH is the other end of the socket for reading,
        # provided to BackupPC_tarExtract.
        #
-       pipe(RH, WH);
+        if ( socketpair(RH, WH, AF_UNIX, SOCK_STREAM, PF_UNSPEC) ) {
+           shutdown(RH, 1);    # no writing to this socket
+           shutdown(WH, 0);    # no reading from this socket
+           setsockopt(RH, SOL_SOCKET, SO_RCVBUF, 8 * 65536);
+           setsockopt(WH, SOL_SOCKET, SO_SNDBUF, 8 * 65536);
+       } else {
+           #
+           # Default to pipe() if socketpair() doesn't work.
+           #
+           pipe(RH, WH);
+       }
 
        #
        # fork a child for BackupPC_tarExtract.  TAR is a file handle
@@ -432,6 +702,7 @@ for my $shareName ( @$ShareNames ) {
            close(WH);
            last;
        }
+       binmode(TAR);
        if ( !$tarPid ) {
            #
            # This is the tar child.  Close the write end of the pipe,
@@ -444,6 +715,7 @@ for my $shareName ( @$ShareNames ) {
            open(STDERR, ">&STDOUT");
            close(STDIN);
            open(STDIN, "<&RH");
+           alarm(0);
            exec("$BinDir/BackupPC_tarExtract", $client, $shareName,
                         $Conf{CompressLevel});
            print(LOG $bpc->timeStamp,
@@ -458,47 +730,59 @@ for my $shareName ( @$ShareNames ) {
        open(NEW_FILES, ">", "$TopDir/pc/$client/NewFileList")
                     || die("can't open $TopDir/pc/$client/NewFileList");
        $newFilesFH = *NEW_FILES;
+       binmode(NEW_FILES);
     }
 
     #
     # Run the transport program
     #
     $xfer->args({
-        host        => $host,
-        client      => $client,
-        hostIP      => $hostIP,
-        shareName   => $shareName,
-        pipeRH      => *RH,
-        pipeWH      => *WH,
-        XferLOG     => $XferLOG,
-       newFilesFH  => $newFilesFH,
-        outDir      => $Dir,
-        type        => $type,
-        lastFull    => $lastFull,
-       lastBkupNum => $lastBkupNum,
-       lastFullBkupNum => $lastFullBkupNum,
-       backups     => \@Backups,
-       compress    => $Conf{CompressLevel},
-       XferMethod  => $Conf{XferMethod},
+        host         => $host,
+        client       => $client,
+        hostIP       => $hostIP,
+        shareName    => $shareName,
+        pipeRH       => *RH,
+        pipeWH       => *WH,
+        XferLOG      => $XferLOG,
+       newFilesFH   => $newFilesFH,
+        outDir       => $Dir,
+        type         => $type,
+        incrBaseTime => $incrBaseTime,
+        incrBaseBkupNum => $incrBaseBkupNum,
+       backups      => \@Backups,
+       compress     => $Conf{CompressLevel},
+       XferMethod   => $Conf{XferMethod},
+       logLevel     => $Conf{XferLogLevel},
+        partialNum   => $partialNum,
+       pidHandler   => \&pidHandler,
+       completionPercent => \&completionPercent,
     });
 
     if ( !defined($logMsg = $xfer->start()) ) {
-        print(LOG $bpc->timeStamp, "xfer start failed: ", $xfer->errStr, "\n");
-        print("dump failed: ", $xfer->errStr, "\n");
-        print("link $clientURI\n") if ( $needLink );
+        my $errStr = "xfer start failed: " . $xfer->errStr . "\n";
+        print(LOG $bpc->timeStamp, $errStr);
         #
         # kill off the tar process, first nicely then forcefully
         #
        if ( $tarPid > 0 ) {
-           kill(2, $tarPid);
+           kill($bpc->sigName2num("INT"), $tarPid);
+           sleep(1);
+           kill($bpc->sigName2num("KILL"), $tarPid);
+       }
+       if ( @xferPid ) {
+           kill($bpc->sigName2num("INT"), @xferPid);
            sleep(1);
-           kill(9, $tarPid);
+           kill($bpc->sigName2num("KILL"), @xferPid);
        }
+       UserCommandRun("DumpPostShareCmd", $shareName) if ( $NeedPostCmd );
        UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
-        exit(1);
+        $XferLOG->write(\"xfer start failed: $errStr\n");
+        $stat{hostError} = $errStr;
+        BackupFailCleanup();
     }
 
-    $xferPid = $xfer->xferPid;
+    @xferPid = $xfer->xferPid;
+
     if ( $useTar ) {
        #
        # The parent must close both handles on the pipe since the children
@@ -506,53 +790,55 @@ for my $shareName ( @$ShareNames ) {
        #
        close(RH);
        close(WH);
-       print(LOG $bpc->timeStamp, $logMsg,
-                                  " (xferPid=$xferPid, tarPid=$tarPid)\n");
-    } elsif ( $xferPid > 0 ) {
-       print(LOG $bpc->timeStamp, $logMsg, " (xferPid=$xferPid)\n");
-    } else {
-       print(LOG $bpc->timeStamp, $logMsg, "\n");
     }
-    print("started $type dump, pid=$xferPid, tarPid=$tarPid,"
-            . " share=$shareName\n");
+    print(LOG $bpc->timeStamp, $logMsg, "\n");
+    $XferLOG->write(\"$logMsg\n");
+    print("started $type dump, share=$shareName\n");
 
-    if ( $useTar || $xferPid > 0 ) {
+    pidHandler(@xferPid);
+
+    if ( $useTar ) {
        #
        # Parse the output of the transfer program and BackupPC_tarExtract
        # while they run.  Since we might be reading from two or more children
        # we use a select.
        #
        my($FDread, $tarOut, $mesg);
-       vec($FDread, fileno(TAR), 1) = 1 if ( $useTar );
+       vec($FDread, fileno(TAR), 1) = 1;
        $xfer->setSelectMask(\$FDread);
 
        SCAN: while ( 1 ) {
            my $ein = $FDread;
            last if ( $FDread =~ /^\0*$/ );
            select(my $rout = $FDread, undef, $ein, undef);
-           if ( $useTar ) {
-               if ( vec($rout, fileno(TAR), 1) ) {
-                   if ( sysread(TAR, $mesg, 8192) <= 0 ) {
-                       vec($FDread, fileno(TAR), 1) = 0;
-                       close(TAR);
-                   } else {
-                       $tarOut .= $mesg;
-                   }
-               }
-               while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) {
-                   $_ = $1;
-                   $tarOut = $2;
-                   $XferLOG->write(\"tarExtract: $_\n");
-                   if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) {
-                       $tarErrs       += $1;
-                       $nFilesExist   += $2;
-                       $sizeExist     += $3;
-                       $sizeExistComp += $4;
-                       $nFilesTotal   += $5;
-                       $sizeTotal     += $6;
-                   }
-               }
-           }
+            if ( vec($rout, fileno(TAR), 1) ) {
+                if ( sysread(TAR, $mesg, 8192) <= 0 ) {
+                    vec($FDread, fileno(TAR), 1) = 0;
+                    close(TAR);
+                } else {
+                    $tarOut .= $mesg;
+                }
+            }
+            while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) {
+                $_ = $1;
+                $tarOut = $2;
+                if ( /^  / ) {
+                    $XferLOG->write(\"$_\n");
+                } else {
+                    $XferLOG->write(\"tarExtract: $_\n");
+                }
+                if ( /^BackupPC_tarExtact aborting \((.*)\)/ ) {
+                    $stat{hostError} = $1;
+                }
+                if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) {
+                    $tarErrs       += $1;
+                    $nFilesExist   += $2;
+                    $sizeExist     += $3;
+                    $sizeExistComp += $4;
+                    $nFilesTotal   += $5;
+                    $sizeTotal     += $6;
+                }
+            }
            last if ( !$xfer->readOutput(\$FDread, $rout) );
            while ( my $str = $xfer->logMsgGet ) {
                print(LOG $bpc->timeStamp, "xfer: $str\n");
@@ -565,7 +851,7 @@ for my $shareName ( @$ShareNames ) {
                # the transfer.
                #
                if ( my $errMsg = CorrectHostCheck($hostIP, $host) ) {
-                   $stat{hostError} = $errMsg;
+                   $stat{hostError} = $errMsg if ( $stat{hostError} eq "" );
                    last SCAN;
                }
            }
@@ -587,6 +873,9 @@ for my $shareName ( @$ShareNames ) {
     # Merge the xfer status (need to accumulate counts)
     #
     my $newStat = $xfer->getStats;
+    if ( $newStat->{fileCnt} == 0 ) {
+       $noFilesErr ||= "No files dumped for share $shareName";
+    }
     foreach my $k ( (keys(%stat), keys(%$newStat)) ) {
         next if ( !defined($newStat->{$k}) );
         if ( $k =~ /Cnt$/ ) {
@@ -600,31 +889,64 @@ for my $shareName ( @$ShareNames ) {
             next;
         }
     }
+
+    if ( $NeedPostCmd ) {
+        UserCommandRun("DumpPostShareCmd", $shareName);
+        if ( $? && $Conf{UserCmdCheckStatus} ) {
+            print(LOG $bpc->timeStamp,
+                    "DumpPostShareCmd returned error status $?... exiting\n");
+            $stat{hostError} = "DumpPostShareCmd returned error status $?";
+        }
+    }
+
     $stat{xferOK} = 0 if ( $stat{hostError} || $stat{hostAbort} );
     if ( !$stat{xferOK} ) {
         #
-        # kill off the tranfer program, first nicely then forcefully
+        # kill off the transfer program, first nicely then forcefully
         #
-       if ( $xferPid > 0 ) {
-           kill(2, $xferPid);
+       if ( @xferPid ) {
+           kill($bpc->sigName2num("INT"), @xferPid);
            sleep(1);
-           kill(9, $xferPid);
+           kill($bpc->sigName2num("KILL"), @xferPid);
        }
         #
         # kill off the tar process, first nicely then forcefully
         #
        if ( $tarPid > 0 ) {
-           kill(2, $tarPid);
+           kill($bpc->sigName2num("INT"), $tarPid);
            sleep(1);
-           kill(9, $tarPid);
+           kill($bpc->sigName2num("KILL"), $tarPid);
        }
         #
         # don't do any more shares on this host
         #
         last;
     }
+    #
+    # Wait for any child processes to exit
+    #
+    # 1 while ( wait() >= 0 );
+}
+
+#
+# If this is a full, and any share had zero files then consider the dump bad
+#
+if ( $type eq "full" && $stat{hostError} eq ""
+           && length($noFilesErr) && $Conf{BackupZeroFilesIsFatal} ) {
+    $stat{hostError} = $noFilesErr;
+    $stat{xferOK} = 0;
+}
+
+$stat{xferOK} = 0 if ( $Abort );
+
+#
+# If there is no "new" directory then the backup is bad
+#
+if ( $stat{xferOK} && !-d "$Dir/new" ) {
+    $stat{hostError} = "No backup directory $Dir/new"
+                            if ( $stat{hostError} eq "" );
+    $stat{xferOK} = 0;
 }
-my $lastNum  = -1;
 
 #
 # Do one last check to make sure it is still the machine we expect.
@@ -635,122 +957,68 @@ if ( $stat{xferOK} && (my $errMsg = CorrectHostCheck($hostIP, $host)) ) {
 }
 
 UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
-$XferLOG->close();
+if ( $? && $Conf{UserCmdCheckStatus} ) {
+    print(LOG $bpc->timeStamp,
+            "DumpPostUserCmd returned error status $?... exiting\n");
+    $stat{hostError} = "DumpPostUserCmd returned error status $?";
+    $stat{xferOK} = 0;
+}
 close($newFilesFH) if ( defined($newFilesFH) );
 
-if ( $stat{xferOK} ) {
-    @Backups = $bpc->BackupInfoRead($client);
-    for ( my $i = 0 ; $i < @Backups ; $i++ ) {
-        $lastNum = $Backups[$i]{num} if ( $lastNum < $Backups[$i]{num} );
-    }
-    $lastNum++;
-    $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$lastNum")
-                                if ( -d "$Dir/$lastNum" );
-    if ( !rename("$Dir/new", "$Dir/$lastNum") ) {
-        print(LOG $bpc->timeStamp,
-                  "Rename $Dir/new -> $Dir/$lastNum failed\n");
-        $stat{xferOK} = 0;
-    }
-    rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$lastNum$fileExt");
-    rename("$Dir/NewFileList", "$Dir/NewFileList.$lastNum");
-}
 my $endTime = time();
 
 #
 # If the dump failed, clean up
 #
 if ( !$stat{xferOK} ) {
-    #
-    # wait a short while and see if the system is still alive
-    #
     $stat{hostError} = $stat{lastOutputLine} if ( $stat{hostError} eq "" );
     if ( $stat{hostError} ) {
         print(LOG $bpc->timeStamp,
                   "Got fatal error during xfer ($stat{hostError})\n");
+       $XferLOG->write(\"Got fatal error during xfer ($stat{hostError})\n");
     }
-    sleep(10);
-    if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
-        $stat{hostAbort} = 1;
-    }
-    if ( $stat{hostAbort} ) {
-        $stat{hostError} = "lost network connection during backup";
-    }
-    print(LOG $bpc->timeStamp, "Dump aborted ($stat{hostError})\n");
-    unlink("$Dir/timeStamp.level0");
-    unlink("$Dir/SmbLOG.bad");
-    unlink("$Dir/SmbLOG.bad$fileExt");
-    unlink("$Dir/XferLOG.bad");
-    unlink("$Dir/XferLOG.bad$fileExt");
-    unlink("$Dir/NewFileList");
-    rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
-    $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
-    print("dump failed: $stat{hostError}\n");
-    print("link $clientURI\n") if ( $needLink );
-    exit(1);
-}
-
-#
-# Add the new backup information to the backup file
-#
-@Backups = $bpc->BackupInfoRead($client);
-my $i = @Backups;
-$Backups[$i]{num}           = $lastNum;
-$Backups[$i]{type}          = $type;
-$Backups[$i]{startTime}     = $startTime;
-$Backups[$i]{endTime}       = $endTime;
-$Backups[$i]{size}          = $sizeTotal;
-$Backups[$i]{nFiles}        = $nFilesTotal;
-$Backups[$i]{xferErrs}      = $stat{xferErrCnt} || 0;
-$Backups[$i]{xferBadFile}   = $stat{xferBadFileCnt} || 0;
-$Backups[$i]{xferBadShare}  = $stat{xferBadShareCnt} || 0;
-$Backups[$i]{nFilesExist}   = $nFilesExist;
-$Backups[$i]{sizeExist}     = $sizeExist;
-$Backups[$i]{sizeExistComp} = $sizeExistComp;
-$Backups[$i]{tarErrs}       = $tarErrs;
-$Backups[$i]{compress}      = $Conf{CompressLevel};
-$Backups[$i]{noFill}        = $type eq "full" ? 0 : 1;
-$Backups[$i]{mangle}        = 1;        # name mangling always on for v1.04+
-$bpc->BackupInfoWrite($client, @Backups);
-
-unlink("$Dir/timeStamp.level0");
-
-#
-# Now remove the bad files, replacing them if possible with links to
-# earlier backups.
-#
-foreach my $file ( $xfer->getBadFiles ) {
-    my $j;
-    unlink("$Dir/$lastNum/$file");
-    for ( $j = $i - 1 ; $j >= 0 ; $j-- ) {
-        next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
-        if ( !link("$Dir/$Backups[$j]{num}/$file", "$Dir/$lastNum/$file") ) {
-            print(LOG $bpc->timeStamp,
-                      "Unable to link $lastNum/$file to"
-                    . " $Backups[$j]{num}/$file\n");
-        } else {
-            print(LOG $bpc->timeStamp,
-                      "Bad file $lastNum/$file replaced by link to"
-                    . " $Backups[$j]{num}/$file\n");
-        }
-        last;
-    }
-    if ( $j < 0 ) {
-        print(LOG $bpc->timeStamp,
-                  "Removed bad file $lastNum/$file (no older"
-                . " copy to link to)\n");
+    if ( !$Abort ) {
+       #
+       # wait a short while and see if the system is still alive
+       #
+       sleep(5);
+       if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+           $stat{hostAbort} = 1;
+       }
+       if ( $stat{hostAbort} ) {
+           $stat{hostError} = "lost network connection during backup";
+       }
+       print(LOG $bpc->timeStamp, "Backup aborted ($stat{hostError})\n");
+       $XferLOG->write(\"Backup aborted ($stat{hostError})\n");
+    } else {
+       $XferLOG->write(\"Backup aborted by user signal\n");
     }
+
+    #
+    # Close the log file and call BackupFailCleanup, which exits.
+    #
+    BackupFailCleanup();
 }
 
+my $newNum = BackupSave();
+
 my $otherCount = $stat{xferErrCnt} - $stat{xferBadFileCnt}
                                    - $stat{xferBadShareCnt};
+$stat{fileCnt}         ||= 0;
+$stat{byteCnt}         ||= 0;
+$stat{xferErrCnt}      ||= 0;
+$stat{xferBadFileCnt}  ||= 0;
+$stat{xferBadShareCnt} ||= 0;
 print(LOG $bpc->timeStamp,
-          "$type backup $lastNum complete, $stat{fileCnt} files,"
+          "$type backup $newNum complete, $stat{fileCnt} files,"
         . " $stat{byteCnt} bytes,"
         . " $stat{xferErrCnt} xferErrs ($stat{xferBadFileCnt} bad files,"
         . " $stat{xferBadShareCnt} bad shares, $otherCount other)\n");
 
 BackupExpire($client);
 
+UserCommandRun("DumpPostFinishCmd"); # XXX ASA search integration
+
 print("$type backup complete\n");
 
 ###########################################################################
@@ -768,38 +1036,192 @@ sub NothingToDo
 
 sub catch_signal
 {
-    my $signame = shift;
-    my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+    my $sigName = shift;
+
+    #
+    # The first time we receive a signal we try to gracefully
+    # abort the backup.  This allows us to keep a partial dump
+    # with the in-progress file deleted and attribute caches
+    # flushed to disk etc.
+    #
+    if ( !length($SigName) ) {
+       my $reason;
+       if ( $sigName eq "INT" ) {
+           $reason = "aborted by user (signal=$sigName)";
+       } else {
+           $reason = "aborted by signal=$sigName";
+       }
+       if ( $Pid == $$ ) {
+           #
+           # Parent logs a message
+           #
+           print(LOG $bpc->timeStamp,
+                   "Aborting backup up after signal $sigName\n");
+
+           #
+           # Tell xfer to abort, but only if we actually started one
+           #
+           $xfer->abort($reason) if ( defined($xfer) );
+
+           #
+           # Send ALRMs to BackupPC_tarExtract if we are using it
+           #
+           if ( $tarPid > 0 ) {
+               kill($bpc->sigName2num("ARLM"), $tarPid);
+           }
+
+           #
+           # Schedule a 20 second timer in case the clean
+           # abort doesn't complete
+           #
+           alarm(20);
+       } else {
+           #
+           # Children ignore anything other than ALRM and INT
+           #
+           if ( $sigName ne "ALRM" && $sigName ne "INT" ) {
+               return;
+           }
+
+           #
+           # The child also tells xfer to abort
+           #
+           $xfer->abort($reason);
+
+           #
+           # Schedule a 15 second timer in case the clean
+           # abort doesn't complete
+           #
+           alarm(15);
+       }
+       $SigName = $sigName;
+       $Abort = 1;
+       return;
+    }
 
-    print(LOG $bpc->timeStamp, "cleaning up after signal $signame\n");
+    #
+    # This is a second signal: time to clean up.
+    #
+    if ( $Pid != $$ && ($sigName eq "ALRM" || $sigName eq "INT") ) {
+       #
+       # Children quit quietly on ALRM or INT
+       #
+       exit(1)
+    }
+
+    #
+    # Ignore other signals in children
+    #
+    return if ( $Pid != $$ );
+
+    $SIG{$sigName} = 'IGNORE';
     UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
-    $XferLOG->write(\"exiting after signal $signame\n");
-    $XferLOG->close();
-    if ( $xferPid > 0 ) {
-        if ( kill(2, $xferPid) <= 0 ) {
-            sleep(1);
-            kill(9, $xferPid);
-        }
+    $XferLOG->write(\"exiting after signal $sigName\n");
+    if ( @xferPid ) {
+        kill($bpc->sigName2num("INT"), @xferPid);
+       sleep(1);
+       kill($bpc->sigName2num("KILL"), @xferPid);
     }
     if ( $tarPid > 0 ) {
-        if ( kill(2, $tarPid) <= 0 ) {
-            sleep(1);
-            kill(9, $tarPid);
-        }
+        kill($bpc->sigName2num("INT"), $tarPid);
+       sleep(1);
+       kill($bpc->sigName2num("KILL"), $tarPid);
     }
-    unlink("$Dir/timeStamp.level0");
-    unlink("$Dir/NewFileList");
-    unlink("$Dir/XferLOG.bad");
-    unlink("$Dir/XferLOG.bad$fileExt");
-    rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
-    $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
-    if ( $signame eq "INT" ) {
-        print("dump failed: aborted by user (signal=$signame)\n");
+    if ( $sigName eq "INT" ) {
+        $stat{hostError} = "aborted by user (signal=$sigName)";
     } else {
-        print("dump failed: received signal=$signame\n");
+        $stat{hostError} = "received signal=$sigName";
     }
+    BackupFailCleanup();
+}
+
+sub CheckForNewFiles
+{
+    if ( -f _ && $File::Find::name !~ /\/fattrib$/ ) {
+        $nFilesTotal++;
+    } elsif ( -d _ ) {
+       #
+       # No need to check entire tree
+       #
+        $File::Find::prune = 1 if ( $nFilesTotal );
+    }
+}
+
+sub BackupFailCleanup
+{
+    my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+    my $keepPartial = 0;
+
+    #
+    # We keep this backup if it is a full and we actually backed
+    # up some files.  If the prior backup was a partial too, we
+    # only keep this backup if it has more files than the previous
+    # partial.
+    #
+    if ( $type eq "full" ) {
+       if ( $nFilesTotal == 0 && $xfer->getStats->{fileCnt} == 0 ) {
+           #
+           # Xfer didn't report any files, but check in the new
+           # directory just in case.
+           #
+           find(\&CheckForNewFiles, "$Dir/new");
+        }
+        my $str;
+        if ( $nFilesTotal > $partialFileCnt
+                || $xfer->getStats->{fileCnt} > $partialFileCnt ) {
+            #
+            # If the last backup wasn't a partial then
+            # $partialFileCnt is undefined, so the above
+            # test is simply $nFilesTotal > 0
+            #
+           $keepPartial = 1;
+            if ( $partialFileCnt ) {
+                $str = "Saving this as a partial backup\n";
+            } else {
+                $str = sprintf("Saving this as a partial backup, replacing the"
+                         . " prior one (got %d and %d files versus %d)\n",
+                         $nFilesTotal, $xfer->getStats->{fileCnt}, $partialFileCnt);
+            }
+       } else {
+            $str = sprintf("Not saving this as a partial backup since it has fewer"
+                     . " files than the prior one (got %d and %d files versus %d)\n",
+                     $nFilesTotal, $xfer->getStats->{fileCnt}, $partialFileCnt);
+        }
+        $XferLOG->write(\$str);
+    }
+
+    #
+    # Don't keep partials if they are disabled
+    #
+    $keepPartial = 0 if ( $Conf{PartialAgeMax} < 0 );
+
+    if ( !$keepPartial ) {
+        #
+        # No point in saving this dump; get rid of eveything.
+        #
+        $XferLOG->close();
+        unlink("$Dir/timeStamp.level0")    if ( -f "$Dir/timeStamp.level0" );
+        unlink("$Dir/SmbLOG.bad")          if ( -f "$Dir/SmbLOG.bad" );
+        unlink("$Dir/SmbLOG.bad$fileExt")  if ( -f "$Dir/SmbLOG.bad$fileExt" );
+        unlink("$Dir/XferLOG.bad")         if ( -f "$Dir/XferLOG.bad" );
+        unlink("$Dir/XferLOG.bad$fileExt") if ( -f "$Dir/XferLOG.bad$fileExt" );
+        unlink("$Dir/NewFileList")         if ( -f "$Dir/NewFileList" );
+        rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
+        $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
+        print("dump failed: $stat{hostError}\n");
+        $XferLOG->close();
+        print("link $clientURI\n") if ( $needLink );
+        exit(1);
+    }
+    #
+    # Ok, now we should save this as a partial dump
+    #
+    $type = "partial";
+    my $newNum = BackupSave();
+    print("dump failed: $stat{hostError}\n");
     print("link $clientURI\n") if ( $needLink );
-    exit(1);
+    print(LOG $bpc->timeStamp, "Saved partial dump $newNum\n");
+    exit(2);
 }
 
 #
@@ -811,16 +1233,25 @@ sub BackupExpire
     my($client) = @_;
     my($Dir) = "$TopDir/pc/$client";
     my(@Backups) = $bpc->BackupInfoRead($client);
-    my($cntFull, $cntIncr, $firstFull, $firstIncr, $oldestIncr, $oldestFull);
+    my($cntFull, $cntIncr, $firstFull, $firstIncr, $oldestIncr,
+       $oldestFull, $changes);
 
+    if ( $Conf{FullKeepCnt} <= 0 ) {
+        print(LOG $bpc->timeStamp,
+                  "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n");
+       print(STDERR
+            "Invalid value for \$Conf{FullKeepCnt}=$Conf{FullKeepCnt}\n")
+                           if ( $opts{v} );
+        return;
+    }
     while ( 1 ) {
        $cntFull = $cntIncr = 0;
        $oldestIncr = $oldestFull = 0;
-       for ( $i = 0 ; $i < @Backups ; $i++ ) {
+       for ( my $i = 0 ; $i < @Backups ; $i++ ) {
            if ( $Backups[$i]{type} eq "full" ) {
                $firstFull = $i if ( $cntFull == 0 );
                $cntFull++;
-           } else {
+           } elsif ( $Backups[$i]{type} eq "incr" ) {
                $firstIncr = $i if ( $cntIncr == 0 );
                $cntIncr++;
            }
@@ -829,35 +1260,66 @@ sub BackupExpire
                         if ( $cntIncr > 0 );
        $oldestFull = (time - $Backups[$firstFull]{startTime}) / (24 * 3600)
                         if ( $cntFull > 0 );
-       if ( $cntIncr > $Conf{IncrKeepCnt}
-               || ($cntIncr > $Conf{IncrKeepCntMin}
-                   && $oldestIncr > $Conf{IncrAgeMax})
-               && (@Backups <= $firstIncr + 1
-                        || $Backups[$firstIncr]{noFill}
-                        || !$Backups[$firstIncr + 1]{noFill}) ) {
+
+        #
+        # With multi-level incrementals, several of the following
+        # incrementals might depend upon this one, so we have to
+        # delete all of the them.  Figure out if that is possible
+        # by counting the number of consecutive incrementals that
+        # are unfilled and have a level higher than this one.
+        #
+        my $cntIncrDel = 1;
+        my $earliestIncr = $oldestIncr;
+
+       for ( my $i = $firstIncr + 1 ; $i < @Backups ; $i++ ) {
+            last if ( $Backups[$i]{level} <= $Backups[$firstIncr]{level}
+                   || !$Backups[$i]{noFill} );
+            $cntIncrDel++;
+            $earliestIncr = (time - $Backups[$i]{startTime}) / (24 * 3600);
+        }
+
+       if ( $cntIncr >= $Conf{IncrKeepCnt} + $cntIncrDel
+               || ($cntIncr >= $Conf{IncrKeepCntMin} + $cntIncrDel
+                   && $earliestIncr > $Conf{IncrAgeMax}) ) {
             #
-            # Only delete an incr backup if the Conf settings are satisfied.
-            # We also must make sure that either this backup is the most
-            # recent one, or it is not filled, or the next backup is filled.
-            # (We can't deleted a filled incr if the next backup is not
-            # filled.)
+            # Only delete an incr backup if the Conf settings are satisfied
+            # for all $cntIncrDel incrementals.  Since BackupRemove() does
+            # a splice() we need to do the deletes in the reverse order.
             # 
-           print(LOG $bpc->timeStamp,
-                      "removing incr backup $Backups[$firstIncr]{num}\n");
-           $bpc->RmTreeDefer("$TopDir/trash",
-                              "$Dir/$Backups[$firstIncr]{num}");
-           unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}")
-                        if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}" );
-           unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}.z")
-                        if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}.z" );
-           unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}")
-                        if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}" );
-           unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}.z")
-                        if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}.z" );
-           splice(@Backups, $firstIncr, 1);
-       } elsif ( ($cntFull > $Conf{FullKeepCnt}
-                    || ($cntFull > $Conf{FullKeepCntMin}
-                        && $oldestFull > $Conf{FullAgeMax}))
+            for ( my $i = $firstIncr + $cntIncrDel - 1 ;
+                    $i >= $firstIncr ; $i-- ) {
+                print(LOG $bpc->timeStamp,
+                          "removing incr backup $Backups[$i]{num}\n");
+                BackupRemove($client, \@Backups, $i);
+                $changes++;
+            }
+            next;
+        }
+
+        #
+        # Delete any old full backups, according to $Conf{FullKeepCntMin}
+       # and $Conf{FullAgeMax}.
+        #
+       # First make sure that $Conf{FullAgeMax} is at least bigger
+       # than $Conf{FullPeriod} * $Conf{FullKeepCnt}, including
+       # the exponential array case.
+        #
+       my $fullKeepCnt = $Conf{FullKeepCnt};
+       $fullKeepCnt = [$fullKeepCnt] if ( ref($fullKeepCnt) ne "ARRAY" );
+       my $fullAgeMax;
+       my $fullPeriod = int(0.5 + $Conf{FullPeriod});
+        $fullPeriod = 7 if ( $fullPeriod <= 0 );
+       for ( my $i = 0 ; $i < @$fullKeepCnt ; $i++ ) {
+           $fullAgeMax += $fullKeepCnt->[$i] * $fullPeriod;
+           $fullPeriod *= 2;
+       }
+       $fullAgeMax += $fullPeriod;     # add some buffer
+
+        if ( $cntFull > $Conf{FullKeepCntMin}
+               && $oldestFull > $Conf{FullAgeMax}
+               && $oldestFull > $fullAgeMax
+              && $Conf{FullKeepCntMin} > 0
+              && $Conf{FullAgeMax} > 0
                && (@Backups <= $firstFull + 1
                         || !$Backups[$firstFull + 1]{noFill}) ) {
             #
@@ -868,59 +1330,350 @@ sub BackupExpire
             # filled.)
             # 
            print(LOG $bpc->timeStamp,
-                   "removing full backup $Backups[$firstFull]{num}\n");
-           $bpc->RmTreeDefer("$TopDir/trash",
-                              "$Dir/$Backups[$firstFull]{num}");
-           unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}")
-                        if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}" );
-           unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}.z")
-                        if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}.z" );
-           unlink("$Dir/XferLOG.$Backups[$firstFull]{num}")
-                        if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}" );
-           unlink("$Dir/XferLOG.$Backups[$firstFull]{num}.z")
-                        if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}.z" );
-           splice(@Backups, $firstFull, 1);
-       } else {
+                   "removing old full backup $Backups[$firstFull]{num}\n");
+            BackupRemove($client, \@Backups, $firstFull);
+            $changes++;
+            next;
+        }
+
+        #
+        # Do new-style full backup expiry, which includes the the case
+       # where $Conf{FullKeepCnt} is an array.
+        #
+        last if ( !BackupFullExpire($client, \@Backups) );
+        $changes++;
+    }
+    $bpc->BackupInfoWrite($client, @Backups) if ( $changes );
+}
+
+#
+# Handle full backup expiry, using exponential periods.
+#
+sub BackupFullExpire
+{
+    my($client, $Backups) = @_;
+    my $fullCnt = 0;
+    my $fullPeriod = $Conf{FullPeriod};
+    my $origFullPeriod = $fullPeriod;
+    my $fullKeepCnt = $Conf{FullKeepCnt};
+    my $fullKeepIdx = 0;
+    my(@delete, @fullList);
+
+    #
+    # Don't delete anything if $Conf{FullPeriod} or $Conf{FullKeepCnt} are
+    # not defined - possibly a corrupted config.pl file.
+    #
+    return if ( !defined($Conf{FullPeriod}) || !defined($Conf{FullKeepCnt}) );
+
+    #
+    # If regular backups are still disabled with $Conf{FullPeriod} < 0,
+    # we still expire backups based on a typical FullPeriod value - weekly.
+    #
+    $fullPeriod = 7 if ( $fullPeriod <= 0 );
+
+    $fullKeepCnt = [$fullKeepCnt] if ( ref($fullKeepCnt) ne "ARRAY" );
+
+    for ( my $i = 0 ; $i < @$Backups ; $i++ ) {
+        next if ( $Backups->[$i]{type} ne "full" );
+        push(@fullList, $i);
+    }
+    for ( my $k = @fullList - 1 ; $k >= 0 ; $k-- ) {
+        my $i = $fullList[$k];
+        my $prevFull = $fullList[$k-1] if ( $k > 0 );
+        #
+        # Don't delete any full that is followed by an unfilled backup,
+        # since it is needed for restore.
+        #
+        my $noDelete = $i + 1 < @$Backups ? $Backups->[$i+1]{noFill} : 0;
+
+        if ( !$noDelete && 
+              ($fullKeepIdx >= @$fullKeepCnt
+              || $k > 0
+                 && $fullKeepIdx > 0
+                 && $Backups->[$i]{startTime} - $Backups->[$prevFull]{startTime}
+                             < ($fullPeriod - $origFullPeriod / 2) * 24 * 3600
+               )
+            ) {
+            #
+            # Delete the full backup
+            #
+            #print("Deleting backup $i ($prevFull)\n");
+            unshift(@delete, $i);
+        } else {
+            $fullCnt++;
+            while ( $fullKeepIdx < @$fullKeepCnt
+                     && $fullCnt >= $fullKeepCnt->[$fullKeepIdx] ) {
+                $fullKeepIdx++;
+                $fullCnt = 0;
+                $fullPeriod = 2 * $fullPeriod;
+            }
+        }
+    }
+    #
+    # Now actually delete the backups
+    #
+    for ( my $i = @delete - 1 ; $i >= 0 ; $i-- ) {
+        print(LOG $bpc->timeStamp,
+               "removing full backup $Backups->[$delete[$i]]{num}\n");
+        BackupRemove($client, $Backups, $delete[$i]);
+    }
+    return @delete;
+}
+
+#
+# Removes any partial backups
+#
+sub BackupPartialRemove
+{
+    my($client, $Backups) = @_;
+
+    for ( my $i = @$Backups - 1 ; $i >= 0 ; $i-- ) {
+        next if ( $Backups->[$i]{type} ne "partial" );
+        BackupRemove($client, $Backups, $i);
+    }
+}
+
+sub BackupSave
+{
+    my @Backups = $bpc->BackupInfoRead($client);
+    my $num  = -1;
+    my $newFilesFH;
+
+    #
+    # Since we got a good backup we should remove any partial dumps
+    # (the new backup might also be a partial, but that's ok).
+    #
+    BackupPartialRemove($client, \@Backups);
+    $needLink = 1 if ( -f "$Dir/NewFileList" );
+
+    #
+    # Number the new backup
+    #
+    for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+        $num = $Backups[$i]{num} if ( $num < $Backups[$i]{num} );
+    }
+    $num++;
+    $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$num") if ( -d "$Dir/$num" );
+    if ( !rename("$Dir/new", "$Dir/$num") ) {
+        print(LOG $bpc->timeStamp, "Rename $Dir/new -> $Dir/$num failed\n");
+        $stat{xferOK} = 0;
+        return;
+    }
+
+    #
+    # Add the new backup information to the backup file
+    #
+    my $i = @Backups;
+    $Backups[$i]{num}           = $num;
+    $Backups[$i]{type}          = $type;
+    $Backups[$i]{startTime}     = $startTime;
+    $Backups[$i]{endTime}       = $endTime;
+    $Backups[$i]{size}          = $sizeTotal;
+    $Backups[$i]{nFiles}        = $nFilesTotal;
+    $Backups[$i]{xferErrs}      = $stat{xferErrCnt} || 0;
+    $Backups[$i]{xferBadFile}   = $stat{xferBadFileCnt} || 0;
+    $Backups[$i]{xferBadShare}  = $stat{xferBadShareCnt} || 0;
+    $Backups[$i]{nFilesExist}   = $nFilesExist;
+    $Backups[$i]{sizeExist}     = $sizeExist;
+    $Backups[$i]{sizeExistComp} = $sizeExistComp;
+    $Backups[$i]{tarErrs}       = $tarErrs;
+    $Backups[$i]{compress}      = $Conf{CompressLevel};
+    $Backups[$i]{noFill}        = $type eq "incr" ? 1 : 0;
+    $Backups[$i]{level}         = $incrLevel;
+    $Backups[$i]{mangle}        = 1;     # name mangling always on for v1.04+
+    $Backups[$i]{xferMethod}    = $Conf{XferMethod};
+    $Backups[$i]{charset}       = $Conf{ClientCharset};
+    $Backups[$i]{version}       = $bpc->Version();
+    #
+    # Save the main backups file
+    #
+    $bpc->BackupInfoWrite($client, @Backups);
+    #
+    # Save just this backup's info in case the main backups file
+    # gets corrupted
+    #
+    BackupPC::Storage->backupInfoWrite($Dir, $Backups[$i]{num},
+                                             $Backups[$i]);
+
+    unlink("$Dir/timeStamp.level0") if ( -f "$Dir/timeStamp.level0" );
+    foreach my $ext ( qw(bad bad.z) ) {
+       next if ( !-f "$Dir/XferLOG.$ext" );
+       unlink("$Dir/XferLOG.$ext.old") if ( -f "$Dir/XferLOG.$ext" );
+       rename("$Dir/XferLOG.$ext", "$Dir/XferLOG.$ext.old");
+    }
+
+    #
+    # Now remove the bad files, replacing them if possible with links to
+    # earlier backups.
+    #
+    foreach my $f ( $xfer->getBadFiles ) {
+       my $j;
+       my $shareM = $bpc->fileNameEltMangle($f->{share});
+       my $fileM  = $bpc->fileNameMangle($f->{file});
+       unlink("$Dir/$num/$shareM/$fileM");
+       for ( $j = $i - 1 ; $j >= 0 ; $j-- ) {
+           my $file;
+           if ( $Backups[$j]{mangle} ) {
+               $file = "$shareM/$fileM";
+           } else {
+               $file = "$f->{share}/$f->{file}";
+           }
+           next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
+
+            my($exists, $digest, $origSize, $outSize, $errs)
+                                = BackupPC::PoolWrite::LinkOrCopy(
+                                      $bpc,
+                                      "$Dir/$Backups[$j]{num}/$file",
+                                      $Backups[$j]{compress},
+                                      "$Dir/$num/$shareM/$fileM",
+                                      $Conf{CompressLevel});
+            if ( !$exists ) {
+                #
+                # the hard link failed, most likely because the target
+                # file has too many links.  We have copied the file
+                # instead, so add this to the new file list.
+                #
+                if ( !defined($newFilesFH) ) {
+                    my $str = "Appending to NewFileList for $shareM/$fileM\n";
+                    $XferLOG->write(\$str);
+                    open($newFilesFH, ">>", "$TopDir/pc/$client/NewFileList")
+                         || die("can't open $TopDir/pc/$client/NewFileList");
+                    binmode($newFilesFH);
+                }
+                if ( -f "$Dir/$num/$shareM/$fileM" ) {
+                    print($newFilesFH "$digest $origSize $shareM/$fileM\n");
+                } else {
+                    my $str = "Unable to link/copy $num/$f->{share}/$f->{file}"
+                            . " to $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+                    $XferLOG->write(\$str);
+                }
+           } else {
+               my $str = "Bad file $num/$f->{share}/$f->{file} replaced"
+                        . " by link to"
+                        . " $Backups[$j]{num}/$f->{share}/$f->{file}\n";
+               $XferLOG->write(\$str);
+           }
            last;
        }
+       if ( $j < 0 ) {
+           my $str = "Removed bad file $num/$f->{share}/$f->{file}"
+                    . " (no older copy to link to)\n";
+           $XferLOG->write(\$str);
+       }
     }
-    $bpc->BackupInfoWrite($client, @Backups);
+    close($newFilesFH) if ( defined($newFilesFH) );
+    $XferLOG->close();
+    rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$num$fileExt");
+    rename("$Dir/NewFileList", "$Dir/NewFileList.$num");
+
+    return $num;
+}
+
+#
+# Removes a specific backup
+#
+sub BackupRemove
+{
+    my($client, $Backups, $idx) = @_;
+    my($Dir) = "$TopDir/pc/$client";
+
+    if ( $Backups->[$idx]{num} eq "" ) {
+        print("BackupRemove: ignoring empty backup number for idx $idx\n");
+        return;
+    }
+
+    $bpc->RmTreeDefer("$TopDir/trash",
+                      "$Dir/$Backups->[$idx]{num}");
+    unlink("$Dir/SmbLOG.$Backups->[$idx]{num}")
+                if ( -f "$Dir/SmbLOG.$Backups->[$idx]{num}" );
+    unlink("$Dir/SmbLOG.$Backups->[$idx]{num}.z")
+                if ( -f "$Dir/SmbLOG.$Backups->[$idx]{num}.z" );
+    unlink("$Dir/XferLOG.$Backups->[$idx]{num}")
+                if ( -f "$Dir/XferLOG.$Backups->[$idx]{num}" );
+    unlink("$Dir/XferLOG.$Backups->[$idx]{num}.z")
+                if ( -f "$Dir/XferLOG.$Backups->[$idx]{num}.z" );
+    splice(@{$Backups}, $idx, 1);
 }
 
 sub CorrectHostCheck
 {
     my($hostIP, $host) = @_;
-    return if ( $hostIP eq $host && !$Conf{FixedIPNetBiosNameCheck} );
+    return if ( $hostIP eq $host && !$Conf{FixedIPNetBiosNameCheck}
+               || $Conf{NmbLookupCmd} eq "" );
     my($netBiosHost, $netBiosUser) = $bpc->NetBiosInfoGet($hostIP);
     return "host $host has mismatching netbios name $netBiosHost"
-            if ( $netBiosHost ne $host );
+               if ( lc($netBiosHost) ne lc(substr($host, 0, 15)) );
     return;
 }
 
+#
+# The Xfer method might tell us from time to time about processes
+# it forks.  We tell BackupPC about this (for status displays) and
+# keep track of the pids in case we cancel the backup
+#
+sub pidHandler
+{
+    @xferPid = @_;
+    @xferPid = grep(/./, @xferPid);
+    return if ( !@xferPid && $tarPid < 0 );
+    my @pids = @xferPid;
+    push(@pids, $tarPid) if ( $tarPid > 0 );
+    my $str = join(",", @pids);
+    $XferLOG->write(\"Xfer PIDs are now $str\n") if ( defined($XferLOG) );
+    print("xferPids $str\n");
+}
+
+#
+# The Xfer method might tell us from time to time about progress
+# in the backup or restore
+#
+sub completionPercent
+{
+    my($percent) = @_;
+
+    $percent = 100 if ( $percent > 100 );
+    $percent =   0 if ( $percent <   0 );
+    if ( !defined($completionPercent)
+        || int($completionPercent + 0.5) != int($percent) ) {
+            printf("completionPercent %.0f\n", $percent);
+    }
+    $completionPercent = $percent;
+}
+
 #
 # Run an optional pre- or post-dump command
 #
 sub UserCommandRun
 {
-    my($type) = @_;
+    my($cmdType, $sharename) = @_;
 
-    return if ( !defined($Conf{$type}) );
+    return if ( !defined($Conf{$cmdType}) );
     my $vars = {
-        xfer    => $xfer,
-        client  => $client,
-        host    => $host,
-        hostIP  => $hostIP,
-        share   => $ShareNames->[0],
-        shares  => $ShareNames,
+        xfer       => $xfer,
+        client     => $client,
+        host       => $host,
+        hostIP     => $hostIP,
+       user       => $Hosts->{$client}{user},
+       moreUsers  => $Hosts->{$client}{moreUsers},
+        share      => $ShareNames->[0],
+        shares     => $ShareNames,
         XferMethod => $Conf{XferMethod},
-        LOG     => *LOG,
-        XferLOG => $XferLOG,
-        stat    => \%stat,
-        xferOK  => $stat{xferOK},
-       type    => $type,
+        sshPath    => $Conf{SshPath},
+        LOG        => *LOG,
+        XferLOG    => $XferLOG,
+        stat       => \%stat,
+        xferOK     => $stat{xferOK} || 0,
+       hostError  => $stat{hostError},
+       type       => $type,
+       cmdType    => $cmdType,
     };
-    my $cmd = $bpc->cmdVarSubstitute($Conf{$type}, $vars);
-    $XferLOG->write(\"Executing $type: @$cmd\n");
+
+    if ($cmdType eq 'DumpPreShareCmd' || $cmdType eq 'DumpPostShareCmd') {
+       $vars->{share} = $sharename;
+    }
+
+    my $cmd = $bpc->cmdVarSubstitute($Conf{$cmdType}, $vars);
+    $XferLOG->write(\"Executing $cmdType: @$cmd\n");
     #
     # Run the user's command, dumping the stdout/stderr into the
     # Xfer log file.  Also supply the optional $vars and %Conf in
@@ -930,6 +1683,7 @@ sub UserCommandRun
     $bpc->cmdSystemOrEval($cmd,
            sub {
                $XferLOG->write(\$_[0]);
+                print(LOG $bpc->timeStamp, "Output from $cmdType: ", $_[0]);
            },
            $vars, \%Conf);
 }