# Version __VERSION__, __RELEASEDATE__
#------------------------------------------------------------------------
+* Add config file editing.
+
+* Added Portuguese Brazillian pt_br.pm from Reginaldo Ferreira.
+
+* Added Slackware init.d script from Tony Nelson.
+
+* Fixed error reporting when restore/archive fail to write the
+ request file to the client directory.
+
+* Applied patch from Marc Prewitt for DumpPreShareCmd and DumpPostShareCmd.
+
+* Apply patch from Pete Wenzel to add smbClientPath => $Conf{SmbClientPath}
+ to DumpPreUserCmd etc.
+
+* Applied Lorenzo Cappelletti's it.pm patch.
+
+* Applied patch to bin/BackupPC_sendEmail from Marc Prewitt that
+ ignores any file starting with "." in the pc directory when
+ it is generating warnings about old/unused files/directories.
+
+* Applied patch from Marc Prewitt to fix host queue order.
+
+* Add NT_STATUS_FILE_LOCK_CONFLICT to pst read error check in
+ BackupPC_sendEmail to fix bug reported by Dale Renton.
+
+* Fixed minor typo in documentation from Richard Ames
+
+#------------------------------------------------------------------------
+# Version 2.1.0pl1, 15 Aug 2004
+#------------------------------------------------------------------------
+
+* Added fix to nl.pm from Lieven Bridts.
+
+* Added patch from Tony Nelson to remove $Info{pid} before BackupPC
+ writes the status and shuts down.
+
+* Changed BackupPC_nightly so that it doesn't call find() if the
+ directory doesn't exist. This avoids errors in certain versions
+ of perl. Reported by Bernd Rilling.
+
+* Fixed BackupPC::CGI::Lib to correctly re-load config.pl for mod_perl.
+ Reported by Tony Nelson and Jimmy Liang.
+
+* Explicitly untaint $In{host} in BackupPC::CGI::Lib to fix problem
+ reported by Thomas Tempé.
+
+* Added newline to "...skipping because of user requested delay..."
+ log message in BackupPC_dump. Reported by Wayne Scott.
+
+* Added read file size error checking to BackupPC_tarCreate.
+ Reported by Brandon Evans.
+
+* Added check in BackupPC::Xfer::RsyncFileIO to ensure that when
+ compression is toggled on/off, a compressed backup doesn't link
+ to an uncompressed pool file (and an uncompressed backup doesn't
+ link to a compressed pool file). Reported by Brandon Evans.
+
+* Updated documentation with new dirvish URL and a typo from
+ Todd Curry.
+
+* Fix to BackupPC_sendEmail so that it correctly sends admin emails
+ for hosts that have failed backups. Reported by Simon Kuhn.
+
+#------------------------------------------------------------------------
+# Version 2.1.0, 20 Jun 2004
+#------------------------------------------------------------------------
+
* Added Dutch translation from Lieven Bridts, with tweaks from
Guus Houtzager.
sub Main_TryToRun_Bg_or_User_Queue
{
my($req, $host);
+ my(@deferUserQueue, @deferBgQueue);
+ my $du;
+
+ if ( time - $Info{DUlastValueTime} >= 600 ) {
+ #
+ # Update our notion of disk usage no more than
+ # once every 10 minutes
+ #
+ $du = $bpc->CheckFileSystemUsage($TopDir);
+ $Info{DUlastValue} = $du;
+ $Info{DUlastValueTime} = time;
+ } else {
+ #
+ # if we recently checked it then just use the old value
+ #
+ $du = $Info{DUlastValue};
+ }
+ if ( $Info{DUDailyMaxReset} ) {
+ $Info{DUDailyMaxStartTime} = time;
+ $Info{DUDailyMaxReset} = 0;
+ $Info{DUDailyMax} = 0;
+ }
+ if ( $du > $Info{DUDailyMax} ) {
+ $Info{DUDailyMax} = $du;
+ $Info{DUDailyMaxTime} = time;
+ }
+ if ( $du > $Conf{DfMaxUsagePct} ) {
+ my @bgQueue = @BgQueue;
+ my $nSkip = 0;
+
+ #
+ # When the disk is too full, only run backups that will
+ # do expires, not regular backups
+ #
+ @BgQueue = ();
+ foreach $req ( @bgQueue ) {
+ if ( $req->{dumpExpire} ) {
+ unshift(@BgQueue, $req);
+ } else {
+ $BgQueueOn{$req->{host}} = 0;
+ $nSkip++;
+ }
+ }
+ if ( $nSkip ) {
+ print(LOG $bpc->timeStamp,
+ "Disk too full ($du%); skipped $nSkip hosts\n");
+ $Info{DUDailySkipHostCnt} += $nSkip;
+ }
+ }
+
while ( $RunNightlyWhenIdle == 0 ) {
local(*FH);
- my(@args, @deferUserQueue, @deferBgQueue, $progName, $type);
+ my(@args, $progName, $type);
my $nJobs = keys(%Jobs);
#
# CmdJob and trashClean don't count towards MaxBackups / MaxUserBackups
#
- $nJobs -= $BackupPCNightlyJobs if ( $CmdJob ne "" );
+ if ( $CmdJob ne "" ) {
+ if ( $BackupPCNightlyJobs ) {
+ $nJobs -= $BackupPCNightlyJobs;
+ } else {
+ $nJobs--;
+ }
+ }
$nJobs-- if ( defined($Jobs{$bpc->trashJob} ) );
if ( $nJobs < $Conf{MaxBackups} + $Conf{MaxUserBackups}
&& @UserQueue > 0 ) {
&& (@CmdQueue + $nJobs)
<= $Conf{MaxBackups} + $Conf{MaxPendingCmds}
&& @BgQueue > 0 ) {
- my $du;
- if ( time - $Info{DUlastValueTime} >= 60 ) {
- #
- # Update our notion of disk usage no more than
- # once every minute
- #
- $du = $bpc->CheckFileSystemUsage($TopDir);
- $Info{DUlastValue} = $du;
- $Info{DUlastValueTime} = time;
- } else {
- #
- # if we recently checked it then just use the old value
- #
- $du = $Info{DUlastValue};
- }
- if ( $Info{DUDailyMaxReset} ) {
- $Info{DUDailyMaxStartTime} = time;
- $Info{DUDailyMaxReset} = 0;
- $Info{DUDailyMax} = 0;
- }
- if ( $du > $Info{DUDailyMax} ) {
- $Info{DUDailyMax} = $du;
- $Info{DUDailyMaxTime} = time;
- }
- if ( $du > $Conf{DfMaxUsagePct} ) {
- my $nSkip = @BgQueue + @deferBgQueue;
- print(LOG $bpc->timeStamp,
- "Disk too full ($du%); skipping $nSkip hosts\n");
- $Info{DUDailySkipHostCnt} += $nSkip;
- @BgQueue = ();
- @deferBgQueue = ();
- %BgQueueOn = ();
- next;
- }
$req = pop(@BgQueue);
if ( defined($Jobs{$req->{host}}) ) {
- push(@deferBgQueue, $req);
+ #
+ # Job is currently running for this host; save it for later
+ #
+ unshift(@deferBgQueue, $req);
next;
}
$BgQueueOn{$req->{host}} = 0;
} else {
- while ( @deferBgQueue ) {
- push(@BgQueue, pop(@deferBgQueue));
- }
- while ( @deferUserQueue ) {
- push(@UserQueue, pop(@deferUserQueue));
- }
+ #
+ # Restore the deferred jobs
+ #
+ @BgQueue = (@BgQueue, @deferBgQueue);
+ @UserQueue = (@UserQueue, @deferUserQueue);
last;
}
$host = $req->{host};
delete($Status{$host}{error});
delete($Status{$host}{errorTime});
$Status{$host}{endTime} = time;
+ $Status{$host}{lastGoodBackupTime} = time;
} elsif ( $mesg =~ /^backups disabled/ ) {
print(LOG $bpc->timeStamp,
"Ignoring old backup error on $host\n");
$Info{pool}{$f[0]}[$chunk]{FileCntRename} += $f[9];
$Info{pool}{$f[0]}[$chunk]{FileLinkMax} = $f[10]
if ( $Info{pool}{$f[0]}[$chunk]{FileLinkMax} < $f[10] );
+ $Info{pool}{$f[0]}[$chunk]{FileLinkTotal} += $f[11];
$Info{pool}{$f[0]}[$chunk]{Time} = time;
} elsif ( $mesg =~ /^BackupPC_nightly lock_off/ ) {
$BackupPCNightlyLock--;
if ( $BackupPCNightlyLock == 0 ) {
#
# This means the last BackupPC_nightly is done with
- # the pool clean, so it's to start running regular
+ # the pool clean, so it's ok to start running regular
# backups again.
#
$RunNightlyWhenIdle = 0;
"User $user requested backup of unknown host"
. " $host\n");
$reply = "error: unknown host $host";
- } elsif ( defined($Jobs{$host})
- && $Jobs{$host}{type} ne "restore" ) {
- print(LOG $bpc->timeStamp,
- "User $user requested backup of $host,"
- . " but one is currently running\n");
- $reply = "error: backup of $host is already running";
} else {
print(LOG $bpc->timeStamp,
"User $user requested backup of $host"
#
sub HostSortCompare
{
+ #
+ # Hosts with errors go before hosts without errors
+ #
return -1 if ( $Status{$a}{error} ne "" && $Status{$b}{error} eq "" );
+
+ #
+ # Hosts with no errors go after hosts with errors
+ #
+
return 1 if ( $Status{$a}{error} eq "" && $Status{$b}{error} ne "" );
- return $Status{$a}{endTime} <=> $Status{$b}{endTime};
+
+ #
+ # hosts with the older last good backups sort earlier
+ #
+ my $r = $Status{$a}{lastGoodBackupTime} <=> $Status{$b}{lastGoodBackupTime};
+ return $r if ( $r );
+
+ #
+ # Finally, just sort based on host name
+ #
+ return $a cmp $b;
}
#
#
sub QueueAllPCs
{
+ my $nSkip = 0;
foreach my $host ( sort(HostSortCompare keys(%$Hosts)) ) {
delete($Status{$host}{backoffTime})
if ( defined($Status{$host}{backoffTime})
#
# this is a fixed ip host: queue it
#
- unshift(@BgQueue,
- {host => $host, user => "BackupPC", reqTime => time,
- dhcp => $Hosts->{$host}{dhcp}});
+ if ( $Info{DUlastValue} > $Conf{DfMaxUsagePct} ) {
+ #
+ # Since we are out of disk space, instead of queuing
+ # a regular job, queue an expire check instead. That
+ # way if the admin reduces the number of backups to
+ # keep then we will actually delete them. Otherwise
+ # BackupPC_dump will never run since we have exceeded
+ # the limit.
+ #
+ $nSkip++;
+ unshift(@BgQueue,
+ {host => $host, user => "BackupPC", reqTime => time,
+ dhcp => $Hosts->{$host}{dhcp}, dumpExpire => 1});
+ } else {
+ #
+ # Queue regular background backup
+ #
+ unshift(@BgQueue,
+ {host => $host, user => "BackupPC", reqTime => time,
+ dhcp => $Hosts->{$host}{dhcp}});
+ }
$BgQueueOn{$host} = 1;
}
}
+ if ( $nSkip ) {
+ print(LOG $bpc->timeStamp,
+ "Disk too full ($Info{DUlastValue}%); skipped $nSkip hosts\n");
+ $Info{DUDailySkipHostCnt} += $nSkip;
+ }
foreach my $dhcp ( @{$Conf{DHCPAddressRanges}} ) {
for ( my $i = $dhcp->{first} ; $i <= $dhcp->{last} ; $i++ ) {
my $ipAddr = "$dhcp->{ipAddrBase}.$i";
}
%Jobs = ();
}
+ delete($Info{pid});
StatusWrite();
unlink("$TopDir/log/BackupPC.pid");
exit(1);
}
if ( !$opts{i} && !$opts{f} && $StatusHost{backoffTime} > time ) {
- printf(LOG "%sskipping because of user requested delay (%.1f hours left)",
+ printf(LOG "%sskipping because of user requested delay (%.1f hours left)\n",
$bpc->timeStamp, ($StatusHost{backoffTime} - time) / 3600);
NothingToDo($needLink);
}
next;
}
+ UserCommandRun("DumpPreShareCmd", $shareName);
+
if ( $Conf{XferMethod} eq "tar" ) {
#
# Use tar (eg: tar/ssh) as the transport program.
my $errStr = BackupPC::Xfer::Rsync::errStr;
print(LOG $bpc->timeStamp, "dump failed: $errStr\n");
print("dump failed: $errStr\n");
+ UserCommandRun("DumpPostShareCmd", $shareName) if ( $NeedPostCmd );
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
exit(1);
}
sleep(1);
kill($bpc->sigName2num("KILL"), @xferPid);
}
+ UserCommandRun("DumpPostShareCmd", $shareName) if ( $NeedPostCmd );
UserCommandRun("DumpPostUserCmd") if ( $NeedPostCmd );
exit(1);
}
next;
}
}
+
+ UserCommandRun("DumpPostShareCmd", $shareName) if ( $NeedPostCmd );
+
$stat{xferOK} = 0 if ( $stat{hostError} || $stat{hostAbort} );
if ( !$stat{xferOK} ) {
#
#
sub UserCommandRun
{
- my($cmdType) = @_;
+ my($cmdType, $sharename) = @_;
return if ( !defined($Conf{$cmdType}) );
my $vars = {
type => $type,
cmdType => $cmdType,
};
+
+ if ($cmdType eq 'DumpPreShareCmd' || $cmdType eq 'DumpPostShareCmd') {
+ $vars->{share} = $sharename;
+ }
+
my $cmd = $bpc->cmdVarSubstitute($Conf{$cmdType}, $vars);
$XferLOG->write(\"Executing $cmdType: @$cmd\n");
#
my $fileRepMax; # worse case number of files that have repeated checksums
# (ie: max(nnn+1) for all names xxxxxxxxxxxxxxxx_nnn)
my $fileLinkMax; # maximum number of hardlinks on a pool file
+my $fileLinkTotal; # total number of hardlinks on entire pool
my $fileCntRename; # number of renamed files (to keep file numbering
# contiguous)
my %FixList; # list of paths that need to be renamed to avoid
$fileLinkMax = 0;
$fileCntRename = 0;
%FixList = ();
- find({wanted => \&GetPoolStats}, "$TopDir/$pool/$dir");
+ find({wanted => \&GetPoolStats}, "$TopDir/$pool/$dir")
+ if ( -d "$TopDir/$pool/$dir" );
my $kb = $blkCnt / 2;
my $kbRm = $blkCntRm / 2;
my $kb2 = $blkCnt2 / 2;
}
print("BackupPC_stats $i = $pool,$fileCnt,$dirCnt,$kb,$kb2,$kbRm,"
. "$fileCntRm,$fileCntRep,$fileRepMax,"
- . "$fileCntRename,$fileLinkMax\n");
+ . "$fileCntRename,$fileLinkMax,$fileLinkTotal\n");
}
}
$blkCnt += $nblocks;
$blkCnt2 += $nblocks if ( $nlinks == 2 );
$fileLinkMax = $nlinks if ( $fileLinkMax < $nlinks );
+ $fileLinkTotal += $nlinks - 1;
}
}
my @badHosts = ();
foreach my $host ( sort(keys(%Status)) ) {
- next if ( $Status{$host}{reason} ne "backup failed"
+ next if ( ($Status{$host}{reason} ne "Reason_backup_failed"
+ && $Status{$host}{reason} ne "Reason_restore_failed")
|| $Status{$host}{error} =~ /^lost network connection to host/ );
push(@badHosts, "$host ($Status{$host}{error})");
}
my @files = $d->read;
$d->close;
foreach my $host ( @files ) {
- next if ( $host eq "." || $host eq ".." || defined($Status{$host}) );
+ next if ( $host =~ /^\./ || defined($Status{$host}) );
push(@oldDirs, "$TopDir/pc/$host");
}
if ( @oldDirs ) {
while ( 1 ) {
my $s = $fh->readLine();
last if ( $s eq "" );
- if ( $s =~ /^\s*Error reading file.*\.pst : ERRDOS - ERRlock/
+ if ( $s =~ /^\s*Error reading file.*\.pst : (ERRDOS - ERRlock|NT_STATUS_FILE_LOCK_CONFLICT)/
|| $s =~ /^\s*Error reading file.*\.pst\. Got 0 bytes/ ) {
$badOutlook = 1;
last;
my $fh = @_;
foreach my $hdr ( @HardLinks ) {
$hdr->{size} = 0;
+ my $name = $hdr->{linkname};
+ $name =~ s{^\./}{/};
+ if ( defined($HardLinkExtraFiles{$name}) ) {
+ $hdr->{linkname} = $HardLinkExtraFiles{$name};
+ }
if ( defined($PathRemove)
&& substr($hdr->{linkname}, 0, length($PathRemove)+1)
eq ".$PathRemove" ) {
TarWriteFileInfo($fh, $hdr);
my($data, $size);
while ( $f->read(\$data, $BufSize) > 0 ) {
+ if ( $size + length($data) > $hdr->{size} ) {
+ print(STDERR "Error: truncating $hdr->{fullPath} to"
+ . " $hdr->{size} bytes\n");
+ $data = substr($data, 0, $hdr->{size} - $size);
+ $ErrorCnt++;
+ }
TarWrite($fh, \$data);
$size += length($data);
}
$f->close;
+ if ( $size != $hdr->{size} ) {
+ print(STDERR "Error: padding $hdr->{fullPath} to $hdr->{size}"
+ . " bytes from $size bytes\n");
+ $ErrorCnt++;
+ while ( $size < $hdr->{size} ) {
+ my $len = $hdr->{size} - $size;
+ $len = $BufSize if ( $len > $BufSize );
+ $data = "\0" x $len;
+ TarWrite($fh, \$data);
+ $size += $len;
+ }
+ }
TarWritePad($fh, $size);
$FileCnt++;
$ByteCnt += $size;
my $done = 0;
my $name = $hdr->{linkname};
$name =~ s{^\./}{/};
- if ( $HardLinkExtraFiles{$name} ) {
+ if ( defined($HardLinkExtraFiles{$name}) ) {
$done = 1;
} else {
foreach my $arg ( @ARGV ) {
# routine, so that we save the hassle of dealing with
# mangling, merging and attributes.
#
- $HardLinkExtraFiles{$hdr->{linkname}} = 1;
+ my $name = $hdr->{linkname};
+ $name =~ s{^\./}{/};
+ $HardLinkExtraFiles{$name} = $hdr->{name};
archiveWrite($fh, $hdr->{linkname}, $hdr->{name});
}
} elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) {
#
my($nRead);
#print("Reading $f->{name}, $f->{size} bytes, type $f->{type}\n");
+ pathCreate($dir, "$OutDir/$ShareName/$f->{mangleName}", $f);
my $poolWrite = BackupPC::PoolWrite->new($bpc,
"$OutDir/$ShareName/$f->{mangleName}",
$f->{size}, $Compress);
# a plain file.
#
$f->{size} = length($f->{linkname});
+ pathCreate($dir, "$OutDir/$ShareName/$f->{mangleName}", $f);
my $poolWrite = BackupPC::PoolWrite->new($bpc,
"$OutDir/$ShareName/$f->{mangleName}",
$f->{size}, $Compress);
# contents.
#
$f->{size} = length($f->{linkname});
+ pathCreate($dir, "$OutDir/$ShareName/$f->{mangleName}", $f);
my $poolWrite = BackupPC::PoolWrite->new($bpc,
"$OutDir/$ShareName/$f->{mangleName}",
$f->{size}, $Compress);
} else {
$data = "$f->{devmajor},$f->{devminor}";
}
+ pathCreate($dir, "$OutDir/$ShareName/$f->{mangleName}", $f);
my $poolWrite = BackupPC::PoolWrite->new($bpc,
"$OutDir/$ShareName/$f->{mangleName}",
length($data), $Compress);
$name);
}
+#
+# Create the parent directory of $file if necessary
+#
+sub pathCreate
+{
+ my($dir, $file, $f) = @_;
+
+ #
+ # Get parent directory of each of $dir and $file
+ #
+ $dir =~ s{/[^/]*$}{};
+ $file =~ s{/[^/]*$}{};
+ return if ( -d $file );
+ mkpath($file, 0, 0777);
+ $Attrib{$dir}->set($file, {
+ type => BPC_FTYPE_DIR,
+ mode => 0755,
+ uid => $f->{uid},
+ gid => $f->{gid},
+ size => 0,
+ mtime => 0,
+ });
+}
+
sub catch_signal
{
my $sigName = shift;
#
#========================================================================
#
-# Version 2.1.0beta2, released 23 May 2004.
+# Version 2.1.0, released 20 Jun 2004.
#
# See http://backuppc.sourceforge.net.
#
"startServer" => "StartServer",
"Stop" => "StopServer",
"adminOpts" => "AdminOptions",
+ "editConfig" => "EditConfig",
+ #"editHosts" => "EditHosts",
);
#
#
$Conf{HardLinkMax} = 31999;
-###########################################################################
-# What to backup and when to do it
-# (can be overridden in the per-PC config.pl)
-###########################################################################
-#
-# Name of the host share that is backed up when using SMB. This can be a
-# string or an array of strings if there are multiple shares per host.
-# Examples:
-#
-# $Conf{SmbShareName} = 'c'; # backup 'c' share
-# $Conf{SmbShareName} = ['c', 'd']; # backup 'c' and 'd' shares
#
-# This setting only matters if $Conf{XferMethod} = 'smb'.
+# Advanced option for asking BackupPC to load additional perl modules.
+# Can be a list (array ref) of module names to load at startup.
#
-$Conf{SmbShareName} = 'C$';
+$Conf{PerlModuleLoad} = undef;
#
-# Smbclient share user name. This is passed to smbclient's -U argument.
-#
-# This setting only matters if $Conf{XferMethod} = 'smb'.
+# Path to init.d script and command to use that script to start the
+# server from the CGI interface. The following variables are substituted
+# at run-time:
#
-$Conf{SmbShareUserName} = '';
-
+# $sshPath path to ssh ($Conf{SshPath})
+# $serverHost same as $Conf{ServerHost}
+# $serverInitdPath path to init.d script ($Conf{ServerInitdPath})
#
-# Smbclient share password. This is passed to smbclient via its PASSWD
-# environment variable. There are several ways you can tell BackupPC
-# the smb share password. In each case you should be very careful about
-# security. If you put the password here, make sure that this file is
-# not readable by regular users! See the "Setting up config.pl" section
-# in the documentation for more information.
+# Example:
#
-# This setting only matters if $Conf{XferMethod} = 'smb'.
+# $Conf{ServerInitdPath} = '/etc/init.d/backuppc';
+# $Conf{ServerInitdStartCmd} = '$sshPath -q -x -l root $serverHost'
+# . ' $serverInitdPath start'
+# . ' < /dev/null >& /dev/null';
#
-$Conf{SmbSharePasswd} = '';
+$Conf{ServerInitdPath} = '';
+$Conf{ServerInitdStartCmd} = '';
-#
-# Which host directories to backup when using tar transport. This can be a
-# string or an array of strings if there are multiple directories to
-# backup per host. Examples:
-#
-# $Conf{TarShareName} = '/'; # backup everything
-# $Conf{TarShareName} = '/home'; # only backup /home
-# $Conf{TarShareName} = ['/home', '/src']; # backup /home and /src
-#
-# The fact this parameter is called 'TarShareName' is for historical
-# consistency with the Smb transport options. You can use any valid
-# directory on the client: there is no need for it to correspond to
-# any Smb share or device mount point.
-#
-# Note also that you can also use $Conf{BackupFilesOnly} to specify
-# a specific list of directories to backup. It's more efficient to
-# use this option instead of $Conf{TarShareName} since a new tar is
-# run for each entry in $Conf{TarShareName}.
-#
-# On the other hand, if you add --one-file-system to $Conf{TarClientCmd}
-# you can backup each file system separately, which makes restoring one
-# bad file system easier. In this case you would list all of the mount
-# points here, since you can't get the same result with
-# $Conf{BackupFilesOnly}:
-#
-# $Conf{TarShareName} = ['/', '/var', '/data', '/boot'];
-#
-# This setting only matters if $Conf{XferMethod} = 'tar'.
-#
-$Conf{TarShareName} = '/';
+###########################################################################
+# What to backup and when to do it
+# (can be overridden in the per-PC config.pl)
+###########################################################################
#
# Minimum period in days between full backups. A full dump will only be
# done if at least this much time has elapsed since the last full dump,
# we keep at least $Conf{FullKeepCntMin} full backups no matter how old
# they are.
#
-# Note that $Conf{FullAgeMax} will be increased to $Conf{FullAgeMax}
-# times $Conf{FullPeriod} if $Conf{FullAgeMax} specifies enough
+# Note that $Conf{FullAgeMax} will be increased to $Conf{FullKeepCnt}
+# times $Conf{FullPeriod} if $Conf{FullKeepCnt} specifies enough
# full backups to exceed $Conf{FullAgeMax}.
#
$Conf{FullKeepCntMin} = 1;
$Conf{BackupZeroFilesIsFatal} = 1;
###########################################################################
-# General per-PC configuration settings
+# How to backup a client
# (can be overridden in the per-PC config.pl)
###########################################################################
#
#
$Conf{XferLogLevel} = 1;
+#
+# Name of the host share that is backed up when using SMB. This can be a
+# string or an array of strings if there are multiple shares per host.
+# Examples:
+#
+# $Conf{SmbShareName} = 'c'; # backup 'c' share
+# $Conf{SmbShareName} = ['c', 'd']; # backup 'c' and 'd' shares
+#
+# This setting only matters if $Conf{XferMethod} = 'smb'.
+#
+$Conf{SmbShareName} = 'C$';
+
+#
+# Smbclient share user name. This is passed to smbclient's -U argument.
+#
+# This setting only matters if $Conf{XferMethod} = 'smb'.
+#
+$Conf{SmbShareUserName} = '';
+
+#
+# Smbclient share password. This is passed to smbclient via its PASSWD
+# environment variable. There are several ways you can tell BackupPC
+# the smb share password. In each case you should be very careful about
+# security. If you put the password here, make sure that this file is
+# not readable by regular users! See the "Setting up config.pl" section
+# in the documentation for more information.
+#
+# This setting only matters if $Conf{XferMethod} = 'smb'.
+#
+$Conf{SmbSharePasswd} = '';
+
#
# Full path for smbclient. Security caution: normal users should not
# allowed to write to this file or directory.
. ' $I_option -U $userName -E -N -d 1'
. ' -c tarmode\\ full -Tx -';
+#
+# Which host directories to backup when using tar transport. This can be a
+# string or an array of strings if there are multiple directories to
+# backup per host. Examples:
+#
+# $Conf{TarShareName} = '/'; # backup everything
+# $Conf{TarShareName} = '/home'; # only backup /home
+# $Conf{TarShareName} = ['/home', '/src']; # backup /home and /src
+#
+# The fact this parameter is called 'TarShareName' is for historical
+# consistency with the Smb transport options. You can use any valid
+# directory on the client: there is no need for it to correspond to
+# any Smb share or device mount point.
+#
+# Note also that you can also use $Conf{BackupFilesOnly} to specify
+# a specific list of directories to backup. It's more efficient to
+# use this option instead of $Conf{TarShareName} since a new tar is
+# run for each entry in $Conf{TarShareName}.
+#
+# On the other hand, if you add --one-file-system to $Conf{TarClientCmd}
+# you can backup each file system separately, which makes restoring one
+# bad file system easier. In this case you would list all of the mount
+# points here, since you can't get the same result with
+# $Conf{BackupFilesOnly}:
+#
+# $Conf{TarShareName} = ['/', '/var', '/data', '/boot'];
+#
+# This setting only matters if $Conf{XferMethod} = 'tar'.
+#
+$Conf{TarShareName} = '/';
+
#
# Full command to run tar on the client. GNU tar is required. You will
# need to fill in the correct paths for ssh2 on the local host (server)
$Conf{PingCmd} = '$pingPath -c 1 $host';
#
-# Path to init.d script and command to use that script to start the
-# server from the CGI interface. The following variables are substituted
-# at run-time:
-#
-# $sshPath path to ssh ($Conf{SshPath})
-# $serverHost same as $Conf{ServerHost}
-# $serverInitdPath path to init.d script ($Conf{ServerInitdPath})
-#
-# Example:
-#
-# $Conf{ServerInitdPath} = '/etc/init.d/backuppc';
-# $Conf{ServerInitdStartCmd} = '$sshPath -q -x -l root $serverHost'
-# . ' $serverInitdPath start'
-# . ' < /dev/null >& /dev/null';
+# Maximum round-trip ping time in milliseconds. This threshold is set
+# to avoid backing up PCs that are remotely connected through WAN or
+# dialup connections. The output from ping -s (assuming it is supported
+# on your system) is used to check the round-trip packet time. On your
+# local LAN round-trip times should be much less than 20msec. On most
+# WAN or dialup connections the round-trip time will be typically more
+# than 20msec. Tune if necessary.
#
-$Conf{ServerInitdPath} = '';
-$Conf{ServerInitdStartCmd} = '';
+$Conf{PingMaxMsec} = 20;
#
# Compression level to use on files. 0 means no compression. Compression
#
$Conf{CompressLevel} = 0;
-#
-# Maximum round-trip ping time in milliseconds. This threshold is set
-# to avoid backing up PCs that are remotely connected through WAN or
-# dialup connections. The output from ping -s (assuming it is supported
-# on your system) is used to check the round-trip packet time. On your
-# local LAN round-trip times should be much less than 20msec. On most
-# WAN or dialup connections the round-trip time will be typically more
-# than 20msec. Tune if necessary.
-#
-$Conf{PingMaxMsec} = 20;
-
#
# Timeout in seconds when listening for the transport program's
# (smbclient, tar etc) stdout. If no output is received during this
$Conf{MaxOldPerPCLogFiles} = 12;
#
-# Optional commands to run before and after dumps and restores.
+# Optional commands to run before and after dumps and restores,
+# and also before and after each share of a dump.
+#
# Stdout from these commands will be written to the Xfer (or Restore)
# log file. One example of using these commands would be to
-# shut down and restart a database server, or to dump a database
-# to files for backup. Example:
+# shut down and restart a database server, dump a database
+# to files for backup, or doing a snapshot of a share prior
+# to a backup. Example:
#
# $Conf{DumpPreUserCmd} = '$sshPath -q -x -l root $host /usr/bin/dumpMysql';
#
# The following variable substitutions are made at run time for
-# $Conf{DumpPreUserCmd} and $Conf{DumpPostUserCmd}:
+# $Conf{DumpPreUserCmd}, $Conf{DumpPostUserCmd}, $Conf{DumpPreShareCmd}
+# and $Conf{DumpPostShareCmd}:
#
# $type type of dump (incr or full)
# $xferOK 1 if the dump succeeded, 0 if it didn't
# $hostIP IP address of host
# $user user name from the hosts file
# $moreUsers list of additional users from the hosts file
-# $share the first share name
+# $share the first share name (or current share for
+# $Conf{DumpPreShareCmd} and $Conf{DumpPostShareCmd})
# $shares list of all the share names
# $XferMethod value of $Conf{XferMethod} (eg: tar, rsync, smb)
# $sshPath value of $Conf{SshPath},
#
$Conf{DumpPreUserCmd} = undef;
$Conf{DumpPostUserCmd} = undef;
+$Conf{DumpPreShareCmd} = undef;
+$Conf{DumpPostShareCmd} = undef;
$Conf{RestorePreUserCmd} = undef;
$Conf{RestorePostUserCmd} = undef;
$Conf{ArchivePreUserCmd} = undef;
#
$Conf{ClientNameAlias} = undef;
-#
-# Advanced option for asking BackupPC to load additional perl modules.
-# Can be a list (array ref) of module names to load at startup.
-#
-$Conf{PerlModuleLoad} = undef;
-
###########################################################################
# Email reminders, status and messages
# (can be overridden in the per-PC config.pl)
#
# Language to use. See lib/BackupPC/Lang for the list of supported
# languages, which include English (en), French (fr), Spanish (es),
-# German (de), Italian (it) and Dutch (nl).
+# German (de), Italian (it), Dutch (nl) and Portuguese Brazillian
+# (pt_br).
#
# Currently the Language setting applies to the CGI interface and email
# messages sent to users. Log files and other text are still in English.
# $Conf{CgiImageDirURL} URL.
#
$Conf{CgiCSSFile} = 'BackupPC_stnd.css';
+
+#
+# Which per-host config variables a non-admin user is allowed
+# to edit.
+#
+$Conf{CgiUserConfigEdit} = {
+ FullPeriod => 1,
+ IncrPeriod => 1,
+ FullKeepCnt => 1,
+ FullKeepCntMin => 1,
+ FullAgeMax => 1,
+ IncrKeepCnt => 1,
+ IncrKeepCntMin => 1,
+ IncrAgeMax => 1,
+ PartialAgeMax => 1,
+ IncrFill => 1,
+ RestoreInfoKeepCnt => 1,
+ ArchiveInfoKeepCnt => 1,
+ BackupFilesOnly => 1,
+ BackupFilesExclude => 1,
+ BlackoutBadPingLimit => 1,
+ BlackoutGoodCnt => 1,
+ BlackoutPeriods => 1,
+ BackupZeroFilesIsFatal => 1,
+ XferMethod => 1,
+ XferLogLevel => 1,
+ SmbShareName => 1,
+ SmbShareUserName => 1,
+ SmbSharePasswd => 1,
+ TarShareName => 1,
+ TarFullArgs => 1,
+ TarIncrArgs => 1,
+ RsyncShareName => 1,
+ RsyncdClientPort => 1,
+ RsyncdPasswd => 1,
+ RsyncdAuthRequired => 1,
+ RsyncCsumCacheVerifyProb => 1,
+ RsyncArgs => 1,
+ RsyncRestoreArgs => 1,
+ ArchiveDest => 1,
+ ArchiveComp => 1,
+ ArchivePar => 1,
+ ArchiveSplit => 1,
+ FixedIPNetBiosNameCheck => 1,
+ PingMaxMsec => 1,
+ ClientTimeout => 1,
+ MaxOldPerPCLogFiles => 1,
+ CompressLevel => 1,
+ ClientNameAlias => 1,
+ EMailNotifyMinDays => 1,
+ EMailFromUserName => 1,
+ EMailAdminUserName => 1,
+ EMailUserDestDomain => 1,
+ EMailNoBackupEverSubj => 1,
+ EMailNoBackupEverMesg => 1,
+ EMailNotifyOldBackupDays => 1,
+ EMailNoBackupRecentSubj => 1,
+ EMailNoBackupRecentMesg => 1,
+ EMailNotifyOldOutlookDays => 1,
+ EMailOutlookBackupSubj => 1,
+ EMailOutlookBackupMesg => 1,
+};
#
# perl configure.pl
#
+# To read about the command-line options for this configure script:
+#
+# perldoc configure.pl
+#
# The installation steps are described as the script runs.
#
# AUTHOR
#
foreach my $dir ( qw(bin doc
lib/BackupPC/CGI
+ lib/BackupPC/Config
lib/BackupPC/Lang
+ lib/BackupPC/Storage
lib/BackupPC/Xfer
lib/BackupPC/Zip
) ) {
printf("Installing library in $DestDir$Conf{InstallDir}/lib\n");
foreach my $lib ( qw(
BackupPC/Lib.pm
- BackupPC/FileZIO.pm
BackupPC/Attrib.pm
+ BackupPC/FileZIO.pm
+ BackupPC/Config.pm
BackupPC/PoolWrite.pm
+ BackupPC/Storage.pm
BackupPC/View.pm
- BackupPC/Xfer/Archive.pm
- BackupPC/Xfer/Tar.pm
- BackupPC/Xfer/Smb.pm
- BackupPC/Xfer/Rsync.pm
- BackupPC/Xfer/RsyncDigest.pm
- BackupPC/Xfer/RsyncFileIO.pm
- BackupPC/Zip/FileMember.pm
- BackupPC/Lang/en.pm
- BackupPC/Lang/fr.pm
- BackupPC/Lang/es.pm
- BackupPC/Lang/de.pm
- BackupPC/Lang/it.pm
- BackupPC/Lang/nl.pm
BackupPC/CGI/AdminOptions.pm
BackupPC/CGI/Archive.pm
BackupPC/CGI/ArchiveInfo.pm
BackupPC/CGI/StopServer.pm
BackupPC/CGI/Summary.pm
BackupPC/CGI/View.pm
+ BackupPC/Config/Meta.pm
+ BackupPC/Lang/en.pm
+ BackupPC/Lang/fr.pm
+ BackupPC/Lang/es.pm
+ BackupPC/Lang/de.pm
+ BackupPC/Lang/it.pm
+ BackupPC/Lang/nl.pm
+ BackupPC/Lang/pt_br.pm
+ BackupPC/Storage/Text.pm
+ BackupPC/Xfer/Archive.pm
+ BackupPC/Xfer/Tar.pm
+ BackupPC/Xfer/Smb.pm
+ BackupPC/Xfer/Rsync.pm
+ BackupPC/Xfer/RsyncDigest.pm
+ BackupPC/Xfer/RsyncFileIO.pm
+ BackupPC/Zip/FileMember.pm
) ) {
InstallFile("lib/$lib", "$DestDir$Conf{InstallDir}/lib/$lib", 0444);
}
printf("Making init.d scripts\n");
foreach my $init ( qw(gentoo-backuppc gentoo-backuppc.conf linux-backuppc
- solaris-backuppc debian-backuppc suse-backuppc) ) {
+ solaris-backuppc debian-backuppc suse-backuppc
+ slackware-backuppc ) ) {
InstallFile("init.d/src/$init", "init.d/$init", 0444);
}
Various programs and scripts use rsync to provide hardlinked backups.
See, for example, Mike Rubel's site (L<http://www.mikerubel.org/computers/rsync_snapshots>),
-JW Schultz's dirvish (L<http://www.pegasys.ws/dirvish> (although as of
-June 2004 this link doesn't work)),
+JW Schultz's dirvish (L<http://www.dirvish.org/>),
Ben Escoto's rdiff-backup (L<http://rdiff-backup.stanford.edu>),
and John Bowman's rlbackup (L<http://www.math.ualberta.ca/imaging/rlbackup>).
=head2 Step 3: Setting up config.pl
After running configure.pl, browse through the config file,
-__INSTALLDIR__/conf/config.pl, and make sure all the default settings
+__TOPDIR__/conf/config.pl, and make sure all the default settings
are correct. In particular, you will need to decide whether to use
smb, tar or rsync transport (or whether to set it on a per-PC basis)
and set the relevant parameters for that transport method.
Note that for direct restore to work, the $Conf{XferMethod} must
be able to write to the client. For example, that means an SMB
share for smbclient needs to be writable, and the rsyncd module
-needs "read only" set to "yes". This creates additional security
+needs "read only" set to "false". This creates additional security
risks. If you only create read-only SMB shares (which is a good
idea), then the direct restore will fail. You can disable the
direct restore option by setting $Conf{SmbClientRestoreCmd},
Lieven Bridts provided the Dutch translation, nl.pm, for v2.1.0,
with some tweaks from Guus Houtzager.
+Reginaldo Ferreira provided the Portuguese Brazillian translation
+pt_br.pm for v2.2.0.
+
Many people have reported bugs, made useful suggestions and helped
with testing; see the ChangeLog and the mail lists.
rc-update add backuppc default
+Slackware:
+=========
+
+When configure.pl is run, the script slackware-backuppc is created.
+
+Install it by running these commands as root:
+
+ cp slackware-backuppc /etc/rc.d/rc.backuppc
+ chmod 755 /etc/rc.d/rc.backuppc
+
+then use an editor to add /etc/rc.d/rc.backuppc to /etc/rc.d/rc.local
+
Solaris:
=======
[ \%ArchiveReq],
[qw(*ArchiveReq)]);
$archive->Indent(1);
- if ( open(REQ, ">$TopDir/pc/$archivehost/$reqFileName") ) {
+ my $openPath = "$TopDir/pc/$archivehost/$reqFileName";
+ if ( open(REQ, ">", $openPath) ) {
binmode(REQ);
print(REQ $archive->Dump);
close(REQ);
} else {
- ErrorExit($Lang->{Can_t_open_create} );
+ ErrorExit(eval("qq{$Lang->{Can_t_open_create__openPath}}"));
}
$reply = $bpc->ServerMesg("archive $User $archivehost $reqFileName");
$str = eval("qq{$Lang->{Archive_requested}}");
$Lang = $bpc->Lang();
$ConfigMTime = $bpc->ConfigMTime();
} elsif ( $bpc->ConfigMTime() != $ConfigMTime ) {
- $bpc->ServerMesg("log Re-read config file because mtime changed");
- $bpc->ServerMesg("server reload");
+ $bpc->ConfigRead();
+ %Conf = $bpc->Conf();
+ $Lang = $bpc->Lang();
+ $ConfigMTime = $bpc->ConfigMTime();
}
#
{map {$_, 1} split(",", $Hosts->{$host}{moreUsers}) }
}
}
+
+ #
+ # Untaint the host name
+ #
+ if ( $In{host} =~ /^([\w.\s-]+)$/ ) {
+ $In{host} = $1;
+ } else {
+ delete($In{host});
+ }
}
sub timeStamp2
{
my($status) = @_;
ServerConnect();
+ %Status = () if ( $status =~ /\bhosts\b/ );
+ %StatusHost = () if ( $status =~ /\bhost\(/ );
my $reply = $bpc->ServerMesg("status $status");
$reply = $1 if ( $reply =~ /(.*)/s );
eval($reply);
{ link => "", name => $Lang->{Status}},
{ link => "?action=adminOpts", name => $Lang->{Admin_Options},
priv => 1},
+ { link => "?action=editConfig", name => "Edit Config",
+ priv => 1},
+ { link => "?action=editHosts", name => "Edit Hosts",
+ priv => 1},
{ link => "?action=summary", name => $Lang->{PC_Summary}},
{ link => "?action=view&type=LOG", name => $Lang->{LOG_file},
priv => 1},
" class=\"navbar\"");
}
if ( -f "$TopDir/pc/$host/config.pl" ) {
- NavLink("?action=view&type=config&host=${EscURI($host)}",
- $Lang->{Config_file}, " class=\"navbar\"");
+ NavLink("?action=editConfig&host=${EscURI($host)}",
+ "Edit Config", " class=\"navbar\"");
}
print "</div>\n";
}
NavSectionTitle($Lang->{NavSectionTitle_});
foreach my $l ( @adminLinks ) {
if ( $PrivAdmin || !$l->{priv} ) {
- my $txt = defined($l->{lname}) ? $Lang->{$l->{lname}} : $l->{name};
+ my $txt = $l->{lname} ne "" ? $Lang->{$l->{lname}} : $l->{name};
NavLink($l->{link}, $txt);
}
}
$dump->Indent(1);
mkpath("$TopDir/pc/$hostDest", 0, 0777)
if ( !-d "$TopDir/pc/$hostDest" );
- if ( open(REQ, ">$TopDir/pc/$hostDest/$reqFileName") ) {
+ my $openPath = "$TopDir/pc/$hostDest/$reqFileName";
+ if ( open(REQ, ">", $openPath) ) {
binmode(REQ);
print(REQ $dump->Dump);
close(REQ);
} else {
- ErrorExit(eval("qq{$Lang->{Can_t_open_create}}"));
+ ErrorExit(eval("qq{$Lang->{Can_t_open_create__openPath}}"));
}
$reply = $bpc->ServerMesg("restore ${EscURI($ipAddr)}"
. " ${EscURI($hostDest)} $User $reqFileName");
my $view = BackupPC::View->new($bpc, $host, \@Backups);
my $a = $view->fileAttrib($num, $share, $dir);
if ( $dir =~ m{(^|/)\.\.(/|$)} || !defined($a) ) {
- ErrorExit("Can't restore bad file ${EscHTML($dir)}");
+ ErrorExit("Can't restore bad file ${EscHTML($dir)} ($num, $share, $dir)");
}
my $f = BackupPC::FileZIO->open($a->{fullPath}, 0, $a->{compress});
my $data;
}
$f->close;
$linkName =~ s/^\.\///;
- my $share = $1 if ( $dir =~ /^\/?(.*?)\// );
restoreFile($host, $num, $share, $linkName, 1, $dir);
return;
}
$Lang{Host__doesn_t_exist} = "Computer \${EscHTML(\$In{hostDest})} existiert nicht";
$Lang{You_don_t_have_permission_to_restore_onto_host} = "Sie haben keine Berechtigung zum Restore auf Computer"
. " \${EscHTML(\$In{hostDest})}";
-$Lang{Can_t_open_create} = "Kann Datei nicht öffnen oder erstellen "
- . "\${EscHTML(\"\$TopDir/pc/\$hostDest/\$reqFileName\")}";
+$Lang{Can_t_open_create__openPath} = "Kann Datei nicht öffnen oder erstellen "
+ . "\${EscHTML(\"\$openPath\")}";
$Lang{Only_privileged_users_can_restore_backup_files2} = "Nur berechtigte Benutzer dĂĽrfen Backup und Restore von Dateien"
. " fĂĽr Computer \${EscHTML(\$host)} durchfĂĽhren.";
$Lang{Empty_host_name} = "leerer Computer Name";
$Lang{Host__doesn_t_exist} = "Host \${EscHTML(\$In{hostDest})} doesn\'t exist";
$Lang{You_don_t_have_permission_to_restore_onto_host} = "You don\'t have permission to restore onto host"
. " \${EscHTML(\$In{hostDest})}";
-$Lang{Can_t_open_create} = "Can\'t open/create "
- . "\${EscHTML(\"\$TopDir/pc/\$hostDest/\$reqFileName\")}";
+$Lang{Can_t_open_create__openPath} = "Can\'t open/create "
+ . "\${EscHTML(\"\$openPath\")}";
$Lang{Only_privileged_users_can_restore_backup_files2} = "Only privileged users can restore backup files"
. " for host \${EscHTML(\$host)}.";
$Lang{Empty_host_name} = "Empty host name";
$Lang{Host__doesn_t_exist} = "El Host \${EscHTML(\$In{hostDest})} no existe";
$Lang{You_don_t_have_permission_to_restore_onto_host} = "No tiene autorizaciĂłn para restaurar en el host"
. " \${EscHTML(\$In{hostDest})}";
-$Lang{Can_t_open_create} = "No puedo abrir/crear "
- . "\${EscHTML(\"\$TopDir/pc/\$hostDest/\$reqFileName\")}";
+$Lang{Can_t_open_create__openPath} = "No puedo abrir/crear "
+ . "\${EscHTML(\"\$openPath\")}";
$Lang{Only_privileged_users_can_restore_backup_files2} = "SĂłlo los usuarios autorizados pueden restaurar copias de seguridad"
. " del host \${EscHTML(\$host)}.";
$Lang{Empty_host_name} = "Nombre de host vacĂo";
$Lang{Host__doesn_t_exist} = "L'hĂ´te \${EscHTML(\$In{hostDest})} n\'existe pas.";
$Lang{You_don_t_have_permission_to_restore_onto_host} = "Vous n\'avez pas la permission de restaurer sur l\'hĂ´te"
. " \${EscHTML(\$In{hostDest})}";
-$Lang{Can_t_open_create} = "Ne peut pas ouvrir/créer "
- . "\${EscHTML(\"\$TopDir/pc/\$hostDest/\$reqFileName\")}";
+$Lang{Can_t_open_create__openPath} = "Ne peut pas ouvrir/créer "
+ . "\${EscHTML(\"\$openPath\")}";
$Lang{Only_privileged_users_can_restore_backup_files2} = "Seuls les utilisateurs privilégiés peuvent restaurer"
. " des fichiers de sauvegarde pour l\'hĂ´te \${EscHTML(\$host)}.";
$Lang{Empty_host_name} = "Nom d\'hĂ´te vide";
#!/bin/perl
#
-# $Id: it.pm,v 1.9 2004/06/20 02:21:02 cbarratt Exp $
+# $Id: it.pm,v 1.10 2004/10/10 07:31:25 cbarratt Exp $
#
# Italian i18n file
#
$Lang{Host__doesn_t_exist} = "L\'host \${EscHTML(\$In{hostDest})} non esiste";
$Lang{You_don_t_have_permission_to_restore_onto_host} = "Non si possiedono i permessi per ripristinare sull\'host"
. " \${EscHTML(\$In{hostDest})}";
-$Lang{Can_t_open_create} = "Impossibile creare/aprire "
- . "\${EscHTML(\"\$TopDir/pc/\$hostDest/\$reqFileName\")}";
+$Lang{Can_t_open_create__openPath} = "Impossibile creare/aprire "
+ . "\${EscHTML(\"\$openPath\")}";
$Lang{Only_privileged_users_can_restore_backup_files2} = "Solo gli utenti privilegiati possono ripristinare i file"
. " per l\'host \${EscHTML(\$host)}.";
$Lang{Empty_host_name} = "Nome host vuoto";
Ciao $userName,
-e` stato effettuato correttamente il backup del tuo PC ($host) per
+non e` stato effettuato correttamente il backup del tuo PC ($host) per
$days giorni. Dal $firstTime fino a $days fa sono stati eseguiti con
successo $numBackups backup. I backup dei PC dovrebbero avvenire
automaticamente quando il tuo PC e` connesso alla rete.
#my %lang;
#use strict;
-#File: nl.pm version 1.3
+#File: nl.pm version 1.5
# --------------------------------
$Lang{Start_Archive} = "Start Archivering";
en het maximum van gisteren was \$Info{DUDailyMaxPrev}%.
</ul>
</ul>
-eof
+EOF
$Lang{BackupPC_Server_Status} = <<EOF;
\${h1(qq{$Lang{H_BackupPC_Server_Status}})}
$Lang{Restore_Options_for__host_Option1_disabled} = <<EOF;
Rechtstreeks terugplaatsen is gedeactiveerd voor machine \${EscHTML(\$hostDest)}.
Gelieve een van de andere herstelopties te kiezen.
-eof
+EOF
# ------------------------------
$Lang{Option_2__Download_Zip_archive} = <<EOF;
$Lang{Host__doesn_t_exist} = "Machine \${EscHTML(\$In{hostDest})} bestaat niet.";
$Lang{You_don_t_have_permission_to_restore_onto_host} = "U beschikt niet over de juiste rechten om bestanden terug te plaatsen naar machine "
. " \${EscHTML(\$In{hostDest})}";
-$Lang{Can_t_open_create} = "Ik kan "
- . "\${EscHTML(\"\$TopDir/pc/\$hostDest/\$reqFileName\")} niet openen of aanmaken";
+$Lang{Can_t_open_create__openPath} = "Ik kan "
+ . "\${EscHTML(\"\$openPath\")} niet openen of aanmaken";
$Lang{Only_privileged_users_can_restore_backup_files2} = "Alleen gebruikers met bijzondere rechten kunnen bestanden terugplaatsen"
. " naar machine \${EscHTML(\$host)}.";
$Lang{Empty_host_name} = "Lege machinenaam";
use strict;
use vars qw(%Conf %Lang);
+use BackupPC::Storage;
use Fcntl qw/:flock/;
use Carp;
use DirHandle ();
my $class = shift;
my($topDir, $installDir, $noUserCheck) = @_;
- my $bpc = bless {
+ my $paths = {
TopDir => $topDir || '/data/BackupPC',
BinDir => $installDir || '/usr/local/BackupPC',
LibDir => $installDir || '/usr/local/BackupPC',
+ };
+ $paths->{BinDir} .= "/bin";
+ $paths->{LibDir} .= "/lib";
+
+ $paths->{storage} = BackupPC::Storage->new($paths);
+
+ my $bpc = bless {
+ %$paths,
Version => '2.1.0',
- BackupFields => [qw(
- num type startTime endTime
- nFiles size nFilesExist sizeExist nFilesNew sizeNew
- xferErrs xferBadFile xferBadShare tarErrs
- compress sizeExistComp sizeNewComp
- noFill fillFromNum mangle xferMethod level
- )],
- RestoreFields => [qw(
- num startTime endTime result errorMsg nFiles size
- tarCreateErrs xferErrs
- )],
- ArchiveFields => [qw(
- num startTime endTime result errorMsg
- )],
}, $class;
- $bpc->{BinDir} .= "/bin";
- $bpc->{LibDir} .= "/lib";
+
#
# Clean up %ENV and setup other variables.
#
print(STDERR $error, "\n");
return;
}
+
#
# Verify we are running as the correct user
#
sub BackupInfoRead
{
my($bpc, $host) = @_;
- local(*BK_INFO, *LOCK);
- my(@Backups);
-
- flock(LOCK, LOCK_EX) if open(LOCK, "$bpc->{TopDir}/pc/$host/LOCK");
- if ( open(BK_INFO, "$bpc->{TopDir}/pc/$host/backups") ) {
- binmode(BK_INFO);
- while ( <BK_INFO> ) {
- s/[\n\r]+//;
- next if ( !/^(\d+\t(incr|full|partial)[\d\t]*$)/ );
- $_ = $1;
- @{$Backups[@Backups]}{@{$bpc->{BackupFields}}} = split(/\t/);
- }
- close(BK_INFO);
- }
- close(LOCK);
- return @Backups;
+
+ return $bpc->{storage}->BackupInfoRead($host);
}
sub BackupInfoWrite
{
my($bpc, $host, @Backups) = @_;
- local(*BK_INFO, *LOCK);
- my($i);
-
- flock(LOCK, LOCK_EX) if open(LOCK, "$bpc->{TopDir}/pc/$host/LOCK");
- if ( -s "$bpc->{TopDir}/pc/$host/backups" ) {
- unlink("$bpc->{TopDir}/pc/$host/backups.old")
- if ( -f "$bpc->{TopDir}/pc/$host/backups.old" );
- rename("$bpc->{TopDir}/pc/$host/backups",
- "$bpc->{TopDir}/pc/$host/backups.old")
- if ( -f "$bpc->{TopDir}/pc/$host/backups" );
- }
- if ( open(BK_INFO, ">$bpc->{TopDir}/pc/$host/backups") ) {
- binmode(BK_INFO);
- for ( $i = 0 ; $i < @Backups ; $i++ ) {
- my %b = %{$Backups[$i]};
- printf(BK_INFO "%s\n", join("\t", @b{@{$bpc->{BackupFields}}}));
- }
- close(BK_INFO);
- }
- close(LOCK);
+
+ return $bpc->{storage}->BackupInfoWrite($host, @Backups);
}
sub RestoreInfoRead
{
my($bpc, $host) = @_;
- local(*RESTORE_INFO, *LOCK);
- my(@Restores);
-
- flock(LOCK, LOCK_EX) if open(LOCK, "$bpc->{TopDir}/pc/$host/LOCK");
- if ( open(RESTORE_INFO, "$bpc->{TopDir}/pc/$host/restores") ) {
- binmode(RESTORE_INFO);
- while ( <RESTORE_INFO> ) {
- s/[\n\r]+//;
- next if ( !/^(\d+.*)/ );
- $_ = $1;
- @{$Restores[@Restores]}{@{$bpc->{RestoreFields}}} = split(/\t/);
- }
- close(RESTORE_INFO);
- }
- close(LOCK);
- return @Restores;
+
+ return $bpc->{storage}->RestoreInfoRead($host);
}
sub RestoreInfoWrite
{
my($bpc, $host, @Restores) = @_;
- local(*RESTORE_INFO, *LOCK);
- my($i);
-
- flock(LOCK, LOCK_EX) if open(LOCK, "$bpc->{TopDir}/pc/$host/LOCK");
- if ( -s "$bpc->{TopDir}/pc/$host/restores" ) {
- unlink("$bpc->{TopDir}/pc/$host/restores.old")
- if ( -f "$bpc->{TopDir}/pc/$host/restores.old" );
- rename("$bpc->{TopDir}/pc/$host/restores",
- "$bpc->{TopDir}/pc/$host/restores.old")
- if ( -f "$bpc->{TopDir}/pc/$host/restores" );
- }
- if ( open(RESTORE_INFO, ">$bpc->{TopDir}/pc/$host/restores") ) {
- binmode(RESTORE_INFO);
- for ( $i = 0 ; $i < @Restores ; $i++ ) {
- my %b = %{$Restores[$i]};
- printf(RESTORE_INFO "%s\n",
- join("\t", @b{@{$bpc->{RestoreFields}}}));
- }
- close(RESTORE_INFO);
- }
- close(LOCK);
+
+ return $bpc->{storage}->RestoreInfoWrite($host, @Restores);
}
sub ArchiveInfoRead
{
my($bpc, $host) = @_;
- local(*ARCHIVE_INFO, *LOCK);
- my(@Archives);
-
- flock(LOCK, LOCK_EX) if open(LOCK, "$bpc->{TopDir}/pc/$host/LOCK");
- if ( open(ARCHIVE_INFO, "$bpc->{TopDir}/pc/$host/archives") ) {
- binmode(ARCHIVE_INFO);
- while ( <ARCHIVE_INFO> ) {
- s/[\n\r]+//;
- next if ( !/^(\d+.*)/ );
- $_ = $1;
- @{$Archives[@Archives]}{@{$bpc->{ArchiveFields}}} = split(/\t/);
- }
- close(ARCHIVE_INFO);
- }
- close(LOCK);
- return @Archives;
+
+ return $bpc->{storage}->ArchiveInfoRead($host);
}
sub ArchiveInfoWrite
{
my($bpc, $host, @Archives) = @_;
- local(*ARCHIVE_INFO, *LOCK);
- my($i);
-
- flock(LOCK, LOCK_EX) if open(LOCK, "$bpc->{TopDir}/pc/$host/LOCK");
- if ( -s "$bpc->{TopDir}/pc/$host/archives" ) {
- unlink("$bpc->{TopDir}/pc/$host/archives.old")
- if ( -f "$bpc->{TopDir}/pc/$host/archives.old" );
- rename("$bpc->{TopDir}/pc/$host/archives",
- "$bpc->{TopDir}/pc/$host/archives.old")
- if ( -f "$bpc->{TopDir}/pc/$host/archives" );
- }
- if ( open(ARCHIVE_INFO, ">$bpc->{TopDir}/pc/$host/archives") ) {
- binmode(ARCHIVE_INFO);
- for ( $i = 0 ; $i < @Archives ; $i++ ) {
- my %b = %{$Archives[$i]};
- printf(ARCHIVE_INFO "%s\n",
- join("\t", @b{@{$bpc->{ArchiveFields}}}));
- }
- close(ARCHIVE_INFO);
- }
- close(LOCK);
+
+ return $bpc->{storage}->ArchiveInfoWrite($host, @Archives);
+}
+
+sub ConfigDataRead
+{
+ my($bpc, $host) = @_;
+
+ return $bpc->{storage}->ConfigDataRead($host);
+}
+
+sub ConfigDataWrite
+{
+ my($bpc, $host, $conf) = @_;
+
+ return $bpc->{storage}->ConfigDataWrite($host, $conf);
}
sub ConfigRead
{
my($bpc, $host) = @_;
- my($ret, $mesg, $config, @configs);
-
- $bpc->{Conf} = ();
- push(@configs, "$bpc->{TopDir}/conf/config.pl");
- push(@configs, "$bpc->{TopDir}/conf/$host.pl")
- if ( $host ne "config" && -f "$bpc->{TopDir}/conf/$host.pl" );
- push(@configs, "$bpc->{TopDir}/pc/$host/config.pl")
- if ( defined($host) && -f "$bpc->{TopDir}/pc/$host/config.pl" );
- foreach $config ( @configs ) {
- %Conf = ();
- if ( !defined($ret = do $config) && ($! || $@) ) {
- $mesg = "Couldn't open $config: $!" if ( $! );
- $mesg = "Couldn't execute $config: $@" if ( $@ );
- $mesg =~ s/[\n\r]+//;
- return $mesg;
- }
- %{$bpc->{Conf}} = ( %{$bpc->{Conf} || {}}, %Conf );
+ my($ret);
+
+ #
+ # Read main config file
+ #
+ my($mesg, $config) = $bpc->{storage}->ConfigDataRead();
+ return $mesg if ( defined($mesg) );
+
+ $bpc->{Conf} = $config;
+
+ #
+ # Read host config file
+ #
+ if ( $host ne "" ) {
+ ($mesg, $config) = $bpc->{storage}->ConfigDataRead($host);
+ return $mesg if ( defined($mesg) );
+ $bpc->{Conf} = { %{$bpc->{Conf}}, %$config };
}
- return if ( !defined($bpc->{Conf}{Language}) );
+
+ #
+ # Load optional perl modules
+ #
if ( defined($bpc->{Conf}{PerlModuleLoad}) ) {
#
# Load any user-specified perl modules. This is for
eval("use $module;");
}
}
+
+ #
+ # Load language file
+ #
+ return "No language setting" if ( !defined($bpc->{Conf}{Language}) );
my $langFile = "$bpc->{LibDir}/BackupPC/Lang/$bpc->{Conf}{Language}.pm";
if ( !defined($ret = do $langFile) && ($! || $@) ) {
$mesg = "Couldn't open language file $langFile: $!" if ( $! );
sub ConfigMTime
{
my($bpc) = @_;
- return (stat("$bpc->{TopDir}/conf/config.pl"))[9];
+
+ return $bpc->{storage}->ConfigMTime();
}
#
sub HostInfoRead
{
my($bpc, $host) = @_;
- my(%hosts, @hdr, @fld);
- local(*HOST_INFO);
- if ( !open(HOST_INFO, "$bpc->{TopDir}/conf/hosts") ) {
- print(STDERR $bpc->timeStamp,
- "Can't open $bpc->{TopDir}/conf/hosts\n");
- return {};
- }
- binmode(HOST_INFO);
- while ( <HOST_INFO> ) {
- s/[\n\r]+//;
- s/#.*//;
- s/\s+$//;
- next if ( /^\s*$/ || !/^([\w\.\\-]+\s+.*)/ );
- #
- # Split on white space, except if preceded by \
- # using zero-width negative look-behind assertion
- # (always wanted to use one of those).
- #
- @fld = split(/(?<!\\)\s+/, $1);
- #
- # Remove any \
- #
- foreach ( @fld ) {
- s{\\(\s)}{$1}g;
- }
- if ( @hdr ) {
- if ( defined($host) ) {
- next if ( lc($fld[0]) ne $host );
- @{$hosts{lc($fld[0])}}{@hdr} = @fld;
- close(HOST_INFO);
- return \%hosts;
- } else {
- @{$hosts{lc($fld[0])}}{@hdr} = @fld;
- }
- } else {
- @hdr = @fld;
- }
- }
- close(HOST_INFO);
- return \%hosts;
+ return $bpc->{storage}->HostInfoRead($host);
}
#
sub HostsMTime
{
my($bpc) = @_;
- return (stat("$bpc->{TopDir}/conf/hosts"))[9];
+
+ return $bpc->{storage}->HostsMTime();
}
#
#
# Note: also update configure.pl when this version number is changed!
#
- if ( $File::RsyncP::VERSION < 0.51 ) {
+ if ( $File::RsyncP::VERSION < 0.52 ) {
$RsyncLibOK = 0;
- $RsyncLibErr = "File::RsyncP module version too old: need 0.50";
+ $RsyncLibErr = "File::RsyncP module version"
+ . " ($File::RsyncP::VERSION) too old: need 0.52";
} else {
$RsyncLibOK = 1;
}
$rsyncClientCmd = $conf->{RsyncClientCmd};
$argList = ['--server', '--sender', @$rsyncArgs,
'.', $t->{shareNameSlash}];
+ $argList = File::RsyncP->excludeStrip($argList);
$fioArgs = {
client => $t->{client},
share => $t->{shareName},
use BackupPC::Xfer::RsyncDigest qw(:all);
use BackupPC::PoolWrite;
-use constant S_IFMT => 0170000; # type of file
-use constant S_IFDIR => 0040000; # directory
-use constant S_IFCHR => 0020000; # character special
-use constant S_IFBLK => 0060000; # block special
-use constant S_IFREG => 0100000; # regular
-use constant S_IFLNK => 0120000; # symbolic link
-use constant S_IFSOCK => 0140000; # socket
-use constant S_IFIFO => 0010000; # fifo
+use constant S_HLINK_TARGET => 0400000; # this file is hardlink target
+use constant S_IFMT => 0170000; # type of file
+use constant S_IFDIR => 0040000; # directory
+use constant S_IFCHR => 0020000; # character special
+use constant S_IFBLK => 0060000; # block special
+use constant S_IFREG => 0100000; # regular
+use constant S_IFLNK => 0120000; # symbolic link
+use constant S_IFSOCK => 0140000; # socket
+use constant S_IFIFO => 0010000; # fifo
use vars qw( $RsyncLibOK );
my $fio = bless {
blockSize => 700,
logLevel => 0,
- digest => File::RsyncP::Digest->new,
+ digest => File::RsyncP::Digest->new($options->{protocol_version}),
checksumSeed => 0,
attrib => {},
logHandler => \&logHandler,
return $fio;
}
+#
+# We publish our version to File::RsyncP. This is so File::RsyncP
+# can provide backward compatibility to older FileIO code.
+#
+# Versions:
+#
+# undef or 1: protocol version 26, no hardlinks
+# 2: protocol version 28, supports hardlinks
+#
+sub version
+{
+ return 2;
+}
+
sub blockSize
{
my($fio, $value) = @_;
return $fio->{blockSize};
}
+sub protocol_version
+{
+ my($fio, $value) = @_;
+
+ if ( defined($value) ) {
+ $fio->{protocol_version} = $value;
+ $fio->{digest}->protocol($fio->{protocol_version});
+ }
+ return $fio->{protocol_version};
+}
+
+sub preserve_hard_links
+{
+ my($fio, $value) = @_;
+
+ $fio->{preserve_hard_links} = $value if ( defined($value) );
+ return $fio->{preserve_hard_links};
+}
+
sub logHandlerSet
{
my($fio, $sub) = @_;
my($fio, $f, $needMD4, $defBlkSize, $phase) = @_;
$defBlkSize ||= $fio->{blockSize};
- my $attr = $fio->attribGet($f);
+ my $attr = $fio->attribGet($f, 1);
$fio->{file} = $f;
$fio->csumEnd if ( defined($fio->{csum}) );
return -1 if ( $attr->{type} != BPC_FTYPE_FILE );
+
#
# Rsync uses short checksums on the first phase. If the whole-file
# checksum fails, then the file is repeated with full checksums.
{
my($fio, $f) = @_;
- my $attr = $fio->attribGet($f);
+ my $attr = $fio->attribGet($f, 1);
$fio->{file} = $f;
$fio->readEnd if ( defined($fio->{fh}) );
if ( !defined($fio->{fh} = BackupPC::FileZIO->open($attr->{fullPath},
sub attribGet
{
- my($fio, $f) = @_;
+ my($fio, $f, $doHardLink) = @_;
my($attr) = $fio->attribGetWhere($f);
+ if ( $doHardLink && $attr->{type} == BPC_FTYPE_HARDLINK ) {
+ $fio->log("$attr->{fullPath}: opening for hardlink read"
+ . " (name = $f->{name})") if ( $fio->{logLevel} >= 4 );
+ my $fh = BackupPC::FileZIO->open($attr->{fullPath}, 0,
+ $attr->{compress});
+ my $target;
+ if ( defined($fh) ) {
+ $fh->read(\$target, 65536);
+ $fh->close;
+ $target =~ s/^\.?\/+//;
+ } else {
+ $fio->log("$attr->{fullPath}: can't open for hardlink read");
+ $fio->{stats}{errorCnt}++;
+ $attr->{type} = BPC_FTYPE_FILE;
+ return $attr;
+ }
+ $target = "/$target" if ( $target !~ /^\// );
+ $fio->log("$attr->{fullPath}: redirecting to $target (will trim "
+ . "$fio->{xfer}{pathHdrSrc})") if ( $fio->{logLevel} >= 4 );
+ $target =~ s/^\Q$fio->{xfer}{pathHdrSrc}//;
+ $f->{name} = $target;
+ $attr = $fio->attribGet($f);
+ $fio->log(" ... now got $attr->{fullPath}")
+ if ( $fio->{logLevel} >= 4 );
+ }
return $attr;
}
sub mode2type
{
- my($fio, $mode) = @_;
+ my($fio, $f) = @_;
+ my $mode = $f->{mode};
if ( ($mode & S_IFMT) == S_IFREG ) {
- return BPC_FTYPE_FILE;
+ if ( defined($f->{hlink}) && !$f->{hlink_self} ) {
+ return BPC_FTYPE_HARDLINK;
+ } else {
+ return BPC_FTYPE_FILE;
+ }
} elsif ( ($mode & S_IFMT) == S_IFDIR ) {
return BPC_FTYPE_DIR;
} elsif ( ($mode & S_IFMT) == S_IFLNK ) {
}
$fio->log("attribSet(dir=$dir, file=$file)") if ( $fio->{logLevel} >= 4 );
+ my $mode = $f->{mode};
+
+ $mode |= S_HLINK_TARGET if ( $f->{hlink_self} );
$fio->{attrib}{$dir}->set($file, {
- type => $fio->mode2type($f->{mode}),
- mode => $f->{mode},
+ type => $fio->mode2type($f),
+ mode => $mode,
uid => $f->{uid},
gid => $f->{gid},
size => $placeHolder ? -1 : $f->{size},
my $path = $fio->{outDirSh} . $fNameM;
my $attr = $fio->attribGet($f);
my $str = "";
- my $type = $fio->mode2type($f->{mode});
+ my $type = $fio->mode2type($f);
$fio->log("makeSpecial($path, $type, $f->{mode})")
if ( $fio->{logLevel} >= 5 );
if ( $type == BPC_FTYPE_CHARDEV || $type == BPC_FTYPE_BLOCKDEV ) {
my($major, $minor, $fh, $fileData);
- $major = $f->{rdev} >> 8;
- $minor = $f->{rdev} & 0xff;
+ if ( defined($f->{rdev_major}) ) {
+ $major = $f->{rdev_major};
+ $minor = $f->{rdev_minor};
+ } else {
+ $major = $f->{rdev} >> 8;
+ $minor = $f->{rdev} & 0xff;
+ }
$str = "$major,$minor";
} elsif ( ($f->{mode} & S_IFMT) == S_IFLNK ) {
$str = $f->{link};
+ } elsif ( ($f->{mode} & S_IFMT) == S_IFREG ) {
+ #
+ # this is a hardlink
+ #
+ if ( !defined($f->{hlink}) ) {
+ $fio->log("Error: makeSpecial($path, $type, $f->{mode}) called"
+ . " on a regular non-hardlink file");
+ return 1;
+ }
+ $str = $f->{hlink};
}
#
# Now see if the file is different, or this is a full, in which
my($fh, $fileData);
if ( $fio->{full}
|| !defined($attr)
- || $attr->{type} != $fio->mode2type($f->{mode})
+ || $attr->{type} != $type
|| $attr->{mtime} != $f->{mtime}
|| $attr->{size} != $f->{size}
|| $attr->{uid} != $f->{uid}
$fh->close if ( defined($fh) );
}
+#
+# Make a hardlink. Returns non-zero on error.
+# This actually gets called twice for each hardlink.
+# Once as the file list is processed, and again at
+# the end. BackupPC does them as it goes (since it is
+# just saving the hardlink info and not actually making
+# hardlinks).
+#
+sub makeHardLink
+{
+ my($fio, $f, $end) = @_;
+
+ return if ( $end );
+ return $fio->makeSpecial($f) if ( !$f->{hlink_self} );
+}
+
sub unlink
{
my($fio, $path) = @_;
my $owner = "$f->{uid}/$f->{gid}";
my $type = (("", "p", "c", "", "d", "", "b", "", "", "", "l", "", "s"))
[($f->{mode} & S_IFMT) >> 12];
+ my $link;
+
+ if ( ($f->{mode} & S_IFMT) == S_IFLNK ) {
+ $link = " -> $f->{link}";
+ } if ( ($f->{mode} & S_IFMT) == S_IFREG
+ && defined($f->{hlink}) && !$f->{hlink_self} ) {
+ $link = " -> $f->{hlink}";
+ }
- $fio->log(sprintf(" %-6s %1s%4o %9s %11.0f %s",
+ $fio->log(sprintf(" %-6s %1s%4o %9s %11.0f %s%s",
$action,
$type,
$f->{mode} & 07777,
$owner,
$f->{size},
- $f->{name}));
+ $f->{name},
+ $link));
}
#
. " ($fio->{rxFile}{size} vs $rxSize)")
if ( $fio->{logLevel} >= 5 );
}
+ #
+ # If compression was off and now on, or on and now off, then
+ # don't do an exact match.
+ #
+ if ( defined($fio->{rxLocalAttr})
+ && !$fio->{rxLocalAttr}{compress} != !$fio->{xfer}{compress} ) {
+ $fio->{rxMatchBlk} = undef; # compression changed, so no file match
+ $fio->log("$fio->{rxFile}{name}: compression changed, so no match"
+ . " ($fio->{rxLocalAttr}{compress} vs $fio->{xfer}{compress})")
+ if ( $fio->{logLevel} >= 4 );
+ }
delete($fio->{rxInFd});
delete($fio->{rxOutFd});
delete($fio->{rxDigest});
if ( $fio->{logLevel} >= 9 );
$fio->{rxOutFile} = $rxOutFile;
$fio->{rxOutFileRel} = $rxOutFileRel;
- $fio->{rxDigest} = File::RsyncP::Digest->new;
+ $fio->{rxDigest} = File::RsyncP::Digest->new($fio->{protocol_version});
$fio->{rxDigest}->add(pack("V", $fio->{checksumSeed}));
}
if ( defined($fio->{rxMatchBlk})
#
# Empty file; just create an empty file digest
#
- $fio->{rxDigest} = File::RsyncP::Digest->new;
+ $fio->{rxDigest} = File::RsyncP::Digest->new($fio->{protocol_version});
$fio->{rxDigest}->add(pack("V", $fio->{checksumSeed}));
$newDigest = $fio->{rxDigest}->digest;
}
my($a, $fio, $fList, $outputFunc) = @_;
my $name = $a->{relPath};
my $n = $name;
- my $type = $fio->mode2type($a->{mode});
+ my $type = $a->{type};
my $extraAttribs = {};
$n =~ s/^\Q$fio->{xfer}{pathHdrSrc}//;
- $fio->log("Sending $name (remote=$n)") if ( $fio->{logLevel} >= 4 );
+ $fio->log("Sending $name (remote=$n) type = $type") if ( $fio->{logLevel} >= 1 );
if ( $type == BPC_FTYPE_CHARDEV
|| $type == BPC_FTYPE_BLOCKDEV
|| $type == BPC_FTYPE_SYMLINK ) {
# Note: char/block devices have $a->{size} = 0, so we
# can't do an error check on $rdSize.
#
- $extraAttribs = { rdev => $1 * 256 + $2 };
+ $extraAttribs = {
+ rdev => $1 * 256 + $2,
+ rdev_major => $1,
+ rdev_minor => $2,
+ };
} else {
$fio->log("$name: unexpected special file contents $str");
$fio->{stats}{errorCnt}++;
$fio->log("$name: can't open");
$fio->{stats}{errorCnt}++;
}
+ } elsif ( $fio->{preserve_hard_links}
+ && ($type == BPC_FTYPE_HARDLINK || $type == BPC_FTYPE_FILE)
+ && ($type == BPC_FTYPE_HARDLINK
+ || $fio->{protocol_version} < 27
+ || $a->{mode} & S_HLINK_TARGET ) ) {
+ #
+ # Fill in fake inode information so that the remote rsync
+ # can correctly create hardlinks.
+ #
+ $name =~ s/^\.?\/+//;
+ my($target, $inode);
+
+ if ( $type == BPC_FTYPE_HARDLINK ) {
+ my $fh = BackupPC::FileZIO->open($a->{fullPath}, 0,
+ $a->{compress});
+ if ( defined($fh) ) {
+ $fh->read(\$target, 65536);
+ $fh->close;
+ $target =~ s/^\.?\/+//;
+ if ( defined($fio->{hlinkFile2Num}{$target}) ) {
+ $inode = $fio->{hlinkFile2Num}{$target};
+ } else {
+ $inode = $fio->{fileListCnt};
+ $fio->{hlinkFile2Num}{$target} = $inode;
+ }
+ } else {
+ $fio->log("$a->{fullPath}: can't open for hardlink");
+ $fio->{stats}{errorCnt}++;
+ }
+ } elsif ( $a->{mode} & S_HLINK_TARGET ) {
+ if ( defined($fio->{hlinkFile2Num}{$name}) ) {
+ $inode = $fio->{hlinkFile2Num}{$name};
+ } else {
+ $inode = $fio->{fileListCnt};
+ $fio->{hlinkFile2Num}{$name} = $inode;
+ }
+ }
+ $inode = $fio->{fileListCnt} if ( !defined($inode) );
+ $fio->log("$name: setting inode to $inode");
+ $extraAttribs = {
+ %$extraAttribs,
+ dev => 0,
+ inode => $inode,
+ };
}
my $f = {
- name => $n,
- #dev => 0, # later, when we support hardlinks
- #inode => 0, # later, when we support hardlinks
- mode => $a->{mode},
- uid => $a->{uid},
- gid => $a->{gid},
- mtime => $a->{mtime},
- size => $a->{size},
- %$extraAttribs,
+ name => $n,
+ mode => $a->{mode} & ~S_HLINK_TARGET,
+ uid => $a->{uid},
+ gid => $a->{gid},
+ mtime => $a->{mtime},
+ size => $a->{size},
+ %$extraAttribs,
};
$fList->encode($f);
$f->{name} = "$fio->{xfer}{pathHdrDest}/$f->{name}";
#
# Cumulate stats
#
+ $fio->{fileListCnt}++;
if ( $type != BPC_FTYPE_DIR ) {
$fio->{stats}{TotalFileCnt}++;
$fio->{stats}{TotalFileSize} += $a->{size};
#
$fio->log("fileListSend: sending file list: "
. join(" ", @{$fio->{fileList}})) if ( $fio->{logLevel} >= 4 );
+ $fio->{fileListCnt} = 0;
+ $fio->{hlinkFile2Num} = {};
foreach my $name ( @{$fio->{fileList}} ) {
$fio->{view}->find($fio->{xfer}{bkupSrcNum},
$fio->{xfer}{bkupSrcShare},
umask(0022);
-my $Version = "2.1.0";
-my $ReleaseDate = "20 Jun 2004";
+my $Version = "2.2.0alpha";
+my $ReleaseDate = "15 Aug 2004";
my $DistDir = "dist/BackupPC-$Version";
my @PerlSrc = qw(
bin/BackupPC_zipCreate
bin/BackupPC_zcat
lib/BackupPC/Attrib.pm
+ lib/BackupPC/Config.pm
lib/BackupPC/FileZIO.pm
lib/BackupPC/Lib.pm
lib/BackupPC/PoolWrite.pm
+ lib/BackupPC/Storage.pm
lib/BackupPC/View.pm
lib/BackupPC/CGI/AdminOptions.pm
lib/BackupPC/CGI/Archive.pm
lib/BackupPC/CGI/ArchiveInfo.pm
lib/BackupPC/CGI/Browse.pm
lib/BackupPC/CGI/DirHistory.pm
+ lib/BackupPC/CGI/EditConfig.pm
lib/BackupPC/CGI/EmailSummary.pm
lib/BackupPC/CGI/GeneralInfo.pm
lib/BackupPC/CGI/HostInfo.pm
lib/BackupPC/CGI/StopServer.pm
lib/BackupPC/CGI/Summary.pm
lib/BackupPC/CGI/View.pm
+ lib/BackupPC/Config/Meta.pm
lib/BackupPC/Lang/de.pm
lib/BackupPC/Lang/en.pm
lib/BackupPC/Lang/es.pm
lib/BackupPC/Lang/fr.pm
lib/BackupPC/Lang/it.pm
lib/BackupPC/Lang/nl.pm
+ lib/BackupPC/Lang/pt_br.pm
+ lib/BackupPC/Storage/Text.pm
lib/BackupPC/Xfer/Archive.pm
lib/BackupPC/Xfer/Smb.pm
lib/BackupPC/Xfer/Tar.pm
foreach my $dir ( qw(bin doc conf images init.d/src cgi-bin
lib/BackupPC/CGI
+ lib/BackupPC/Config
lib/BackupPC/Lang
+ lib/BackupPC/Storage
lib/BackupPC/Xfer
lib/BackupPC/Zip
) ) {
init.d/src/gentoo-backuppc
init.d/src/gentoo-backuppc.conf
init.d/src/linux-backuppc
+ init.d/src/slackware-backuppc
init.d/src/solaris-backuppc
init.d/src/suse-backuppc
doc/BackupPC.pod
} else {
$vars->{$1}++;
}/eg;
- s/UserCommandRun\("([^"]*)"\)/if ( !defined($vars->{$1}) ) {
+ s/UserCommandRun\("([^"]*)"/if ( !defined($vars->{$1}) ) {
print("Unexpected Conf var $1 in $file\n");
$errors++;
} else {
open(F, $file) || die("can't open $file");
binmode(F);
while ( <F> ) {
+ next if ( /^\s*#/ );
s/\$Lang->{([^}]*)}/$vars->{$1} = 1;/eg;
}
close(F);