From e9453b7611be63303572ae443d5fb56b73364678 Mon Sep 17 00:00:00 2001 From: cbarratt Date: Fri, 13 Dec 2002 03:19:57 +0000 Subject: [PATCH] * Support for rsync and rsyncd. Changes to BackupPC_dump and new modules BackupPC::Xfer::Rsync and BackupPC::Xfer::RsyncFileIO. * Added new BackupPC::View module that creates views of backups (handling merging etc). Updated BackupPC_Admin, BackupPC_zipCreate and BackupPC_tarCreate to use BackupPC::View. This removes lots of merging and mangling code from the higher-level code. --- ChangeLog | 8 + bin/BackupPC | 15 +- bin/BackupPC_compressPool | 4 +- bin/BackupPC_dump | 382 ++++++++----- bin/BackupPC_link | 4 +- bin/BackupPC_nightly | 4 +- bin/BackupPC_restore | 4 +- bin/BackupPC_sendEmail | 4 +- bin/BackupPC_serverMesg | 4 +- bin/BackupPC_tarCreate | 200 +------ bin/BackupPC_tarExtract | 4 +- bin/BackupPC_trashClean | 4 +- bin/BackupPC_zcat | 4 +- bin/BackupPC_zipCreate | 192 +------ cgi-bin/BackupPC_Admin | 410 ++++++-------- conf/config.pl | 87 ++- configure.pl | 23 +- doc-src/BackupPC.pod | 92 +++- lib/BackupPC/Attrib.pm | 15 +- lib/BackupPC/FileZIO.pm | 2 +- lib/BackupPC/Lang/en.pm | 20 +- lib/BackupPC/Lang/fr.pm | 20 +- lib/BackupPC/Lib.pm | 18 +- lib/BackupPC/PoolWrite.pm | 2 +- lib/BackupPC/View.pm | 337 ++++++++++++ lib/BackupPC/Xfer/Rsync.pm | 353 ++++++++++++ lib/BackupPC/Xfer/RsyncFileIO.pm | 915 +++++++++++++++++++++++++++++++ lib/BackupPC/Xfer/Smb.pm | 27 +- lib/BackupPC/Xfer/Tar.pm | 17 +- lib/BackupPC/Zip/FileMember.pm | 2 +- makeDist | 27 +- 31 files changed, 2354 insertions(+), 846 deletions(-) create mode 100644 lib/BackupPC/View.pm create mode 100644 lib/BackupPC/Xfer/Rsync.pm create mode 100644 lib/BackupPC/Xfer/RsyncFileIO.pm diff --git a/ChangeLog b/ChangeLog index e5e0dbd..4a6f7f1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -21,6 +21,14 @@ # Version __VERSION__, __RELEASEDATE__ #------------------------------------------------------------------------ +* Support for rsync and rsyncd. Changes to BackupPC_dump and new + modules BackupPC::Xfer::Rsync and BackupPC::Xfer::RsyncFileIO. + +* Added new BackupPC::View module that creates views of backups + (handling merging etc). Updated BackupPC_Admin, BackupPC_zipCreate + and BackupPC_tarCreate to use BackupPC::View. This removes lots + of merging and mangling code from the higher-level code. + * Added internationalization (i18n) code from Xavier Nicollet. Voila! BackupPC_Admin now supports English and French, and adding more languages is now easy. diff --git a/bin/BackupPC b/bin/BackupPC index 5c17c2b..f8ca366 100755 --- a/bin/BackupPC +++ b/bin/BackupPC @@ -47,7 +47,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -55,7 +55,7 @@ use strict; use vars qw(%Status %Info $Hosts); -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; @@ -796,10 +796,11 @@ sub Main_Check_Job_Messages } else { print(LOG $bpc->timeStamp, "dhcp $host: $mesg\n"); } - } elsif ( $mesg =~ /^started (.*) dump, pid=(\d+), tarPid=(-?\d+)/ ) { - $Jobs{$host}{type} = $1; - $Jobs{$host}{xferPid} = $2; - $Jobs{$host}{tarPid} = $3; + } elsif ( $mesg =~ /^started (.*) dump, pid=(-?\d+), tarPid=(-?\d+), share=(.*)/ ) { + $Jobs{$host}{type} = $1; + $Jobs{$host}{xferPid} = $2; + $Jobs{$host}{tarPid} = $3; + $Jobs{$host}{shareName} = $4; print(LOG $bpc->timeStamp, "Started $1 backup on $host" . " (pid=$Jobs{$host}{pid}, xferPid=$2", @@ -807,7 +808,7 @@ sub Main_Check_Job_Messages ? ", tarPid=$Jobs{$host}{tarPid}" : "", $Jobs{$host}{dhcpHostIP} ? ", dhcp=$Jobs{$host}{dhcpHostIP}" : "", - ")\n"); + ", share=$Jobs{$host}{shareName})\n"); $Status{$host}{state} = "backup in progress"; $Status{$host}{reason} = ""; $Status{$host}{type} = $1; diff --git a/bin/BackupPC_compressPool b/bin/BackupPC_compressPool index e243837..a3d3583 100755 --- a/bin/BackupPC_compressPool +++ b/bin/BackupPC_compressPool @@ -49,7 +49,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -61,7 +61,7 @@ use File::Find; use File::Path; use Compress::Zlib; use Getopt::Std; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; diff --git a/bin/BackupPC_dump b/bin/BackupPC_dump index 4d917f5..7fe11fe 100755 --- a/bin/BackupPC_dump +++ b/bin/BackupPC_dump @@ -28,11 +28,11 @@ # full or incremental backup needs to be run. If no backup is # scheduled, or a ping to $host fails, then BackupPC_dump quits. # -# The backup is done using smbclient, extracting the dump into -# $TopDir/pc/$host/new. The smbclient output is put into -# $TopDir/pc/$host/XferLOG. +# The backup is done using the selected XferMethod (smb, tar, rsync etc), +# extracting the dump into $TopDir/pc/$host/new. The xfer output is +# put into $TopDir/pc/$host/XferLOG. # -# If the dump succeeds (based on parsing the output of smbclient): +# If the dump succeeds (based on parsing the output of the XferMethod): # - $TopDir/pc/$host/new is renamed to $TopDir/pc/$host/nnn, where # nnn is the next sequential dump number. # - $TopDir/pc/$host/XferLOG is renamed to $TopDir/pc/$host/XferLOG.nnn. @@ -67,19 +67,19 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; use BackupPC::Xfer::Smb; use BackupPC::Xfer::Tar; - +use BackupPC::Xfer::Rsync; use File::Path; use Getopt::Std; @@ -129,7 +129,10 @@ my $tarPid = -1; # # Re-read config file, so we can include the PC-specific config # -$bpc->ConfigRead($host); +if ( defined(my $error = $bpc->ConfigRead($host)) ) { + print("Can't read PC's config file: $error\n"); + exit(1); +} %Conf = $bpc->Conf(); # @@ -191,10 +194,15 @@ if ( $opts{d} ) { print("DHCP $hostIP $host\n"); } -my($needLink, @Backups, $type); +my($needLink, @Backups, $type, $lastBkupNum, $lastFullBkupNum); my $lastFull = 0; my $lastIncr = 0; +if ( $Conf{FullPeriod} == -1 && !$opts{f} && !$opts{i} + || $Conf{FullPeriod} == -2 ) { + NothingToDo($needLink); +} + if ( !$opts{i} && !$opts{f} && $Conf{BlackoutGoodCnt} >= 0 && $StatusHost{aliveCnt} >= $Conf{BlackoutGoodCnt} ) { my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time); @@ -202,20 +210,16 @@ if ( !$opts{i} && !$opts{f} && $Conf{BlackoutGoodCnt} >= 0 if ( $Conf{BlackoutHourBegin} <= $currHours && $currHours <= $Conf{BlackoutHourEnd} && grep($_ == $wday, @{$Conf{BlackoutWeekDays}}) ) { - print(LOG $bpc->timeStamp, "skipping because of blackout" - . " (alive $StatusHost{aliveCnt} times)\n"); - print("nothing to do\n"); - print("link $host\n") if ( $needLink ); - exit(1); +# print(LOG $bpc->timeStamp, "skipping because of blackout" +# . " (alive $StatusHost{aliveCnt} times)\n"); + NothingToDo($needLink); } } if ( !$opts{i} && !$opts{f} && $StatusHost{backoffTime} > time ) { printf(LOG "%sskipping because of user requested delay (%.1f hours left)", $bpc->timeStamp, ($StatusHost{backoffTime} - time) / 3600); - print("nothing to do\n"); - print("link $host\n") if ( $needLink ); - exit(1); + NothingToDo($needLink); } # @@ -231,9 +235,12 @@ BackupExpire($host); for ( my $i = 0 ; $i < @Backups ; $i++ ) { $needLink = 1 if ( $Backups[$i]{nFilesNew} eq "" || -f "$Dir/NewFileList.$Backups[$i]{num}" ); + $lastBkupNum = $Backups[$i]{num}; if ( $Backups[$i]{type} eq "full" ) { - $lastFull = $Backups[$i]{startTime} - if ( $lastFull < $Backups[$i]{startTime} ); + if ( $lastFull < $Backups[$i]{startTime} ) { + $lastFull = $Backups[$i]{startTime}; + $lastFullBkupNum = $Backups[$i]{num}; + } } else { $lastIncr = $Backups[$i]{startTime} if ( $lastIncr < $Backups[$i]{startTime} ); @@ -252,9 +259,7 @@ if ( @Backups == 0 && time - $lastFull > $Conf{IncrPeriod} * 24*3600) ) { $type = "incr"; } else { - print("nothing to do\n"); - print("link $host\n") if ( $needLink ); - exit(0); + NothingToDo($needLink); } # @@ -314,9 +319,12 @@ my $sizeExistComp = 0; my $nFilesTotal = 0; my $sizeTotal = 0; my($logMsg, %stat, $xfer, $ShareNames); +my $newFilesFH; if ( $Conf{XferMethod} eq "tar" ) { $ShareNames = $Conf{TarShareName}; +} elsif ( $Conf{XferMethod} eq "rsync" || $Conf{XferMethod} eq "rsyncd" ) { + $ShareNames = $Conf{RsyncShareName}; } else { $ShareNames = $Conf{SmbShareName}; } @@ -337,143 +345,197 @@ for my $shareName ( @$ShareNames ) { next; } - # - # Create a pipe to connect smbclient to BackupPC_tarExtract - # WH is the write handle for writing, provided to the transport - # program, and RH is the other end of the pipe for reading, - # provided to BackupPC_tarExtract. - # - pipe(RH, WH); - - # - # fork a child for BackupPC_tarExtract. TAR is a file handle - # on which we (the parent) read the stdout & stderr from - # BackupPC_tarExtract. - # - if ( !defined($tarPid = open(TAR, "-|")) ) { - print(LOG $bpc->timeStamp, "can't fork to run tar\n"); - print("can't fork to run tar\n"); - close(RH); - close(WH); - last; - } - if ( !$tarPid ) { - # - # This is the tar child. Close the write end of the pipe, - # clone STDERR to STDOUT, clone STDIN from RH, and then - # exec BackupPC_tarExtract. - # - setpgrp 0,0; - close(WH); - close(STDERR); - open(STDERR, ">&STDOUT"); - close(STDIN); - open(STDIN, "<&RH"); - exec("$BinDir/BackupPC_tarExtract '$host' '$shareName'" - . " $Conf{CompressLevel}"); - print(LOG $bpc->timeStamp, "can't exec $BinDir/BackupPC_tarExtract\n"); - exit(0); - } - - # - # Run the transport program - # - my $xferArgs = { - host => $host, - hostIP => $hostIP, - shareName => $shareName, - pipeRH => *RH, - pipeWH => *WH, - XferLOG => $XferLOG, - outDir => $Dir, - type => $type, - lastFull => $lastFull, - }; if ( $Conf{XferMethod} eq "tar" ) { # # Use tar (eg: tar/ssh) as the transport program. # - $xfer = BackupPC::Xfer::Tar->new($bpc, $xferArgs); + $xfer = BackupPC::Xfer::Tar->new($bpc); + } elsif ( $Conf{XferMethod} eq "rsync" || $Conf{XferMethod} eq "rsyncd" ) { + # + # Use rsync as the transport program. + # + if ( !defined($xfer = BackupPC::Xfer::Rsync->new($bpc)) ) { + print(LOG $bpc->timeStamp, + "dump failed: File::RsyncP module is not installed\n"); + print("dump failed: Rsync module is not installed\n"); + exit(1); + } } else { # # Default is to use smbclient (smb) as the transport program. # - $xfer = BackupPC::Xfer::Smb->new($bpc, $xferArgs); + $xfer = BackupPC::Xfer::Smb->new($bpc); } + my $useTar = $xfer->useTar; + + if ( $useTar ) { + # + # This xfer method outputs a tar format file, so we start a + # BackupPC_tarExtract to extract the data. + # + # Create a pipe to connect the Xfer method to BackupPC_tarExtract + # WH is the write handle for writing, provided to the transport + # program, and RH is the other end of the pipe for reading, + # provided to BackupPC_tarExtract. + # + pipe(RH, WH); + + # + # fork a child for BackupPC_tarExtract. TAR is a file handle + # on which we (the parent) read the stdout & stderr from + # BackupPC_tarExtract. + # + if ( !defined($tarPid = open(TAR, "-|")) ) { + print(LOG $bpc->timeStamp, "can't fork to run tar\n"); + print("can't fork to run tar\n"); + close(RH); + close(WH); + last; + } + if ( !$tarPid ) { + # + # This is the tar child. Close the write end of the pipe, + # clone STDERR to STDOUT, clone STDIN from RH, and then + # exec BackupPC_tarExtract. + # + setpgrp 0,0; + close(WH); + close(STDERR); + open(STDERR, ">&STDOUT"); + close(STDIN); + open(STDIN, "<&RH"); + exec("$BinDir/BackupPC_tarExtract '$host' '$shareName'" + . " $Conf{CompressLevel}"); + print(LOG $bpc->timeStamp, + "can't exec $BinDir/BackupPC_tarExtract\n"); + exit(0); + } + } elsif ( !defined($newFilesFH) ) { + # + # We need to create the NewFileList output file + # + local(*NEW_FILES); + open(NEW_FILES, ">$TopDir/pc/$host/NewFileList") + || die("can't open $TopDir/pc/$host/NewFileList"); + $newFilesFH = *NEW_FILES; + } + + # + # Run the transport program + # + $xfer->args({ + host => $host, + hostIP => $hostIP, + shareName => $shareName, + pipeRH => *RH, + pipeWH => *WH, + XferLOG => $XferLOG, + newFilesFH => $newFilesFH, + outDir => $Dir, + type => $type, + lastFull => $lastFull, + lastBkupNum => $lastBkupNum, + lastFullBkupNum => $lastFullBkupNum, + backups => \@Backups, + compress => $Conf{CompressLevel}, + XferMethod => => $Conf{XferMethod}, + }); + if ( !defined($logMsg = $xfer->start()) ) { - print(LOG $bpc->timeStamp, $xfer->errStr, "\n"); - print($xfer->errStr, "\n"); + print(LOG $bpc->timeStamp, "xfer start failed: ", $xfer->errStr, "\n"); + print("dump failed: ", $xfer->errStr, "\n"); print("link $host\n") if ( $needLink ); # # kill off the tar process, first nicely then forcefully # - kill(2, $tarPid); - sleep(1); - kill(9, $tarPid); + if ( $tarPid > 0 ) { + kill(2, $tarPid); + sleep(1); + kill(9, $tarPid); + } exit(1); } - # - # The parent must close both handles on the pipe since the children - # are using these handles now. - # - close(RH); - close(WH); - $xferPid = $xfer->xferPid; - print(LOG $bpc->timeStamp, $logMsg, - " (xferPid=$xferPid, tarPid=$tarPid)\n"); - print("started $type dump, pid=$xferPid, tarPid=$tarPid\n"); - # - # Parse the output of the transfer program and BackupPC_tarExtract - # while they run. Since we are reading from two or more children - # we use a select. - # - my($FDread, $tarOut, $mesg); - vec($FDread, fileno(TAR), 1) = 1; - $xfer->setSelectMask(\$FDread); - - SCAN: while ( 1 ) { - my $ein = $FDread; - last if ( $FDread =~ /^\0*$/ ); - select(my $rout = $FDread, undef, $ein, undef); - if ( vec($rout, fileno(TAR), 1) ) { - if ( sysread(TAR, $mesg, 8192) <= 0 ) { - vec($FDread, fileno(TAR), 1) = 0; - close(TAR); - } else { - $tarOut .= $mesg; - } - } - while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) { - $_ = $1; - $tarOut = $2; - $XferLOG->write(\"tarExtract: $_\n"); - if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) { - $tarErrs = $1; - $nFilesExist = $2; - $sizeExist = $3; - $sizeExistComp = $4; - $nFilesTotal = $5; - $sizeTotal = $6; - } - } - last if ( !$xfer->readOutput(\$FDread, $rout) ); - while ( my $str = $xfer->logMsgGet ) { - print(LOG $bpc->timeStamp, "xfer: $str\n"); - } - if ( $xfer->getStats->{fileCnt} == 1 ) { - # - # Make sure it is still the machine we expect. We do this while - # the transfer is running to avoid a potential race condition if - # the ip address was reassigned by dhcp just before we started - # the transfer. - # - if ( my $errMsg = CorrectHostCheck($hostIP, $host) ) { - $stat{hostError} = $errMsg; - last SCAN; - } - } + $xferPid = $xfer->xferPid; + if ( $useTar ) { + # + # The parent must close both handles on the pipe since the children + # are using these handles now. + # + close(RH); + close(WH); + print(LOG $bpc->timeStamp, $logMsg, + " (xferPid=$xferPid, tarPid=$tarPid)\n"); + } elsif ( $xferPid > 0 ) { + print(LOG $bpc->timeStamp, $logMsg, " (xferPid=$xferPid)\n"); + } else { + print(LOG $bpc->timeStamp, $logMsg, "\n"); } + print("started $type dump, pid=$xferPid, tarPid=$tarPid," + . " share=$shareName\n"); + + if ( $useTar || $xferPid > 0 ) { + # + # Parse the output of the transfer program and BackupPC_tarExtract + # while they run. Since we might be reading from two or more children + # we use a select. + # + my($FDread, $tarOut, $mesg); + vec($FDread, fileno(TAR), 1) = 1 if ( $useTar ); + $xfer->setSelectMask(\$FDread); + + SCAN: while ( 1 ) { + my $ein = $FDread; + last if ( $FDread =~ /^\0*$/ ); + select(my $rout = $FDread, undef, $ein, undef); + if ( $useTar ) { + if ( vec($rout, fileno(TAR), 1) ) { + if ( sysread(TAR, $mesg, 8192) <= 0 ) { + vec($FDread, fileno(TAR), 1) = 0; + close(TAR); + } else { + $tarOut .= $mesg; + } + } + while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) { + $_ = $1; + $tarOut = $2; + $XferLOG->write(\"tarExtract: $_\n"); + if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) { + $tarErrs = $1; + $nFilesExist = $2; + $sizeExist = $3; + $sizeExistComp = $4; + $nFilesTotal = $5; + $sizeTotal = $6; + } + } + } + last if ( !$xfer->readOutput(\$FDread, $rout) ); + while ( my $str = $xfer->logMsgGet ) { + print(LOG $bpc->timeStamp, "xfer: $str\n"); + } + if ( $xfer->getStats->{fileCnt} == 1 ) { + # + # Make sure it is still the machine we expect. We do this while + # the transfer is running to avoid a potential race condition if + # the ip address was reassigned by dhcp just before we started + # the transfer. + # + if ( my $errMsg = CorrectHostCheck($hostIP, $host) ) { + $stat{hostError} = $errMsg; + last SCAN; + } + } + } + } else { + # + # otherwise the xfer module does everything for us + # + ($tarErrs, $nFilesExist, $sizeExist, $sizeExistComp, + $nFilesTotal, $sizeTotal) = $xfer->run(); + } + # # Merge the xfer status (need to accumulate counts) # @@ -496,15 +558,19 @@ for my $shareName ( @$ShareNames ) { # # kill off the tranfer program, first nicely then forcefully # - kill(2, $xferPid); - sleep(1); - kill(9, $xferPid); + if ( $xferPid > 0 ) { + kill(2, $xferPid); + sleep(1); + kill(9, $xferPid); + } # # kill off the tar process, first nicely then forcefully # - kill(2, $tarPid); - sleep(1); - kill(9, $tarPid); + if ( $tarPid > 0 ) { + kill(2, $tarPid); + sleep(1); + kill(9, $tarPid); + } # # don't do any more shares on this host # @@ -512,6 +578,7 @@ for my $shareName ( @$ShareNames ) { } } $XferLOG->close(); +close($newFilesFH) if ( defined($newFilesFH) ); my $lastNum = -1; @@ -641,11 +708,23 @@ print("$type backup complete\n"); # Subroutines ########################################################################### +sub NothingToDo +{ + my($needLink) = @_; + + print("nothing to do\n"); + print("link $host\n") if ( $needLink ); + exit(0); +} + sub catch_signal { my $signame = shift; + my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : ""; print(LOG $bpc->timeStamp, "cleaning up after signal $signame\n"); + $XferLOG->write(\"exiting after signal $signame\n"); + $XferLOG->close(); if ( $xferPid > 0 ) { if ( kill(2, $xferPid) <= 0 ) { sleep(1); @@ -660,8 +739,15 @@ sub catch_signal } unlink("$Dir/timeStamp.level0"); unlink("$Dir/NewFileList"); + unlink("$Dir/XferLOG.bad"); + unlink("$Dir/XferLOG.bad$fileExt"); + rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt"); $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" ); - print("exiting after signal $signame\n"); + if ( $signame eq "INT" ) { + print("dump failed: aborted by user (signal=$signame)\n"); + } else { + print("dump failed: received signal=$signame\n"); + } print("link $host\n") if ( $needLink ); exit(1); } diff --git a/bin/BackupPC_link b/bin/BackupPC_link index 85a9388..b8f7481 100755 --- a/bin/BackupPC_link +++ b/bin/BackupPC_link @@ -39,14 +39,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::Attrib; use BackupPC::PoolWrite; diff --git a/bin/BackupPC_nightly b/bin/BackupPC_nightly index 7279536..b1a06ae 100755 --- a/bin/BackupPC_nightly +++ b/bin/BackupPC_nightly @@ -35,14 +35,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; diff --git a/bin/BackupPC_restore b/bin/BackupPC_restore index 54a7a86..5104dd7 100755 --- a/bin/BackupPC_restore +++ b/bin/BackupPC_restore @@ -29,14 +29,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; use BackupPC::Xfer::Smb; diff --git a/bin/BackupPC_sendEmail b/bin/BackupPC_sendEmail index 21c0c93..2a7184c 100755 --- a/bin/BackupPC_sendEmail +++ b/bin/BackupPC_sendEmail @@ -31,14 +31,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; diff --git a/bin/BackupPC_serverMesg b/bin/BackupPC_serverMesg index a12e45a..325e5d1 100755 --- a/bin/BackupPC_serverMesg +++ b/bin/BackupPC_serverMesg @@ -43,14 +43,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; diff --git a/bin/BackupPC_tarCreate b/bin/BackupPC_tarCreate index 844ab97..04713df 100755 --- a/bin/BackupPC_tarCreate +++ b/bin/BackupPC_tarCreate @@ -48,19 +48,20 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use File::Path; use Getopt::Std; use BackupPC::Lib; use BackupPC::Attrib qw(:all); use BackupPC::FileZIO; +use BackupPC::View; die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); my $TopDir = $bpc->TopDir(); @@ -90,30 +91,15 @@ if ( $opts{n} !~ /^(\d+)$/ ) { my $Num = $opts{n}; my @Backups = $bpc->BackupInfoRead($Host); -my($Compress, $Mangle, $CompressF, $MangleF, $NumF, $i); my $FileCnt = 0; my $ByteCnt = 0; my $DirCnt = 0; my $SpecialCnt = 0; my $ErrorCnt = 0; +my $i; for ( $i = 0 ; $i < @Backups ; $i++ ) { - if ( !$Backups[$i]{noFill} ) { - # - # Remember the most recent filled backup - # - $NumF = $Backups[$i]{num}; - $MangleF = $Backups[$i]{mangle}; - $CompressF = $Backups[$i]{compress}; - } - next if ( $Backups[$i]{num} != $Num ); - $Compress = $Backups[$i]{compress}; - $Mangle = $Backups[$i]{mangle}; - if ( !$Backups[$i]{noFill} ) { - # no need to back-fill a filled backup - $NumF = $MangleF = $CompressF = undef; - } - last; + last if ( $Backups[$i]{num} == $Num ); } if ( $i >= @Backups ) { print(STDERR "$0: bad backup number $Num for host $Host\n"); @@ -126,11 +112,7 @@ if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ ) { print(STDERR "$0: bad share name '$opts{s}'\n"); exit(1); } -my $ShareNameOrig = $opts{s}; -my $ShareName = $Mangle ? $bpc->fileNameEltMangle($ShareNameOrig) - : $ShareNameOrig; -my $ShareNameF = $MangleF ? $bpc->fileNameEltMangle($ShareNameOrig) - : $ShareNameOrig; +my $ShareName = $opts{s}; # # This constant and the line of code below that uses it are borrowed @@ -196,37 +178,16 @@ exit(0); sub archiveWrite { my($fh, $dir, $tarPathOverride) = @_; - if ( $dir =~ m{(^|/)\.\.(/|$)} || $dir !~ /^(.*)$/ ) { + + my $view = BackupPC::View->new($bpc, $Host, \@Backups); + + if ( $dir =~ m{(^|/)\.\.(/|$)} ) { print(STDERR "$0: bad directory '$dir'\n"); $ErrorCnt++; - next; - } - (my $DirOrig = $1) =~ s{/+$}{}; - $DirOrig =~ s{^\.?/+}{}; - my($Dir, $DirF, $FullPath, $FullPathF); - if ( $DirOrig eq "" ) { - $Dir = $DirF = ""; - $FullPath = "$TopDir/pc/$Host/$Num/$ShareName"; - $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF" - if ( defined($NumF) ); - } else { - $Dir = $Mangle ? $bpc->fileNameMangle($DirOrig) : $DirOrig; - $DirF = $MangleF ? $bpc->fileNameMangle($DirOrig) : $DirOrig; - $FullPath = "$TopDir/pc/$Host/$Num/$ShareName/$Dir"; - $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF/$DirF" - if ( defined($NumF) ); - } - if ( -f $FullPath ) { - TarWriteFile($fh, $FullPath, $Mangle, $Compress, $tarPathOverride); - } elsif ( -d $FullPath || (defined($NumF) && -d $FullPathF) ) { - MergeFind($fh, $FullPath, $FullPathF); - } elsif ( defined($NumF) && -f $FullPathF ) { - TarWriteFile($fh, $FullPathF, $MangleF, $CompressF, $tarPathOverride); - } else { - print(STDERR "$0: $Host, backup $Num, doesn't have a directory or file" - . " $ShareNameOrig/$DirOrig\n"); - $ErrorCnt++; + return; } + $view->find($Num, $ShareName, $dir, 0, \&TarWriteFile, + $fh, $tarPathOverride); } sub UidLookup @@ -360,63 +321,11 @@ my $AttrDir; sub TarWriteFile { - my($fh, $fullName, $mangle, $compress, $tarPathOverride) = @_; - my($tarPath); + my($hdr, $fh, $tarPathOverride) = @_; - if ( $fullName =~ m{^\Q$TopDir/pc/$Host/$Num/$ShareName\E(.*)} - || (defined($NumF) - && $fullName =~ m{^\Q$TopDir/pc/$Host/$NumF/$ShareNameF\E(.*)}) ) { - $tarPath = $mangle ? $bpc->fileNameUnmangle($1) : $1; - } else { - print(STDERR "Unexpected file name from find: $fullName\n"); - return; - } + my $tarPath = $hdr->{relPath}; $tarPath = $tarPathOverride if ( defined($tarPathOverride) ); - (my $dir = $fullName) =~ s{/([^/]*)$}{}; - my $fileName = $mangle ? $bpc->fileNameUnmangle($1) : $1; - if ( $mangle && $AttrDir ne $dir ) { - $AttrDir = $dir; - $Attr = BackupPC::Attrib->new({ compress => $compress }); - if ( -f $Attr->fileName($dir) && !$Attr->read($dir) ) { - print(STDERR "Can't read attribute file in $dir\n"); - $ErrorCnt++; - $Attr = undef; - } - } - my $hdr = $Attr->get($fileName) if ( defined($Attr) ); - if ( !defined($hdr) ) { - # - # No attributes. Must be an old style backup. Reconstruct - # what we can. Painful part is computing the size if compression - # is on: only method is to uncompress the file. - # - my @s = stat($fullName); - $hdr = { - type => -d _ ? BPC_FTYPE_DIR : BPC_FTYPE_FILE, - mode => $s[2], - uid => $s[4], - gid => $s[5], - size => -f _ ? $s[7] : 0, - mtime => $s[9], - }; - if ( $compress && -f _ ) { - # - # Compute the correct size by reading the whole file - # - my $f = BackupPC::FileZIO->open($fullName, 0, $compress); - if ( !defined($f) ) { - print(STDERR "Unable to open file $fullName\n"); - $ErrorCnt++; - return; - } - my($data, $size); - while ( $f->read(\$data, $BufSize) > 0 ) { - $size += length($data); - } - $f->close; - $hdr->{size} = $size; - } - } + if ( defined($PathRemove) && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) { substr($tarPath, 0, length($PathRemove)) = $PathAdd; @@ -436,9 +345,9 @@ sub TarWriteFile # # Regular file: write the header and file # - my $f = BackupPC::FileZIO->open($fullName, 0, $compress); + my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); if ( !defined($f) ) { - print(STDERR "Unable to open file $fullName\n"); + print(STDERR "Unable to open file $hdr->{fullPath}\n"); $ErrorCnt++; return; } @@ -460,9 +369,9 @@ sub TarWriteFile # # Start by reading the contents of the link. # - my $f = BackupPC::FileZIO->open($fullName, 0, $compress); + my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); if ( !defined($f) ) { - print(STDERR "Unable to open file $fullName\n"); + print(STDERR "Unable to open file $hdr->{fullPath}\n"); $ErrorCnt++; return; } @@ -508,9 +417,9 @@ sub TarWriteFile # Symbolic link: read the symbolic link contents into the header # and write the header. # - my $f = BackupPC::FileZIO->open($fullName, 0, $compress); + my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); if ( !defined($f) ) { - print(STDERR "Unable to open symlink file $fullName\n"); + print(STDERR "Unable to open symlink file $hdr->{fullPath}\n"); $ErrorCnt++; return; } @@ -530,11 +439,12 @@ sub TarWriteFile # major and minor numbers from a plain file. # if ( $hdr->{type} != BPC_FTYPE_FIFO ) { - my $f = BackupPC::FileZIO->open($fullName, 0, $compress); + my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, + $hdr->{compress}); my $data; if ( !defined($f) || $f->read(\$data, $BufSize) < 0 ) { print(STDERR "Unable to open/read char/block special file" - . " $fullName\n"); + . " $hdr->{fullPath}\n"); $f->close if ( defined($f) ); $ErrorCnt++; return; @@ -553,63 +463,3 @@ sub TarWriteFile $ErrorCnt++; } } - -# -# Does a recursive find of $dir, filling in from the (filled dump) -# directory $dirF. Handles the cases where $dir and $dirF might -# or might not be mangled etc. -# -sub MergeFind -{ - my($fh, $dir, $dirF) = @_; - - my(@Dir, $fLast); - if ( -d $dir ) { - TarWriteFile($fh, $dir, $Mangle, $Compress); - } elsif ( -d $dirF ) { - TarWriteFile($fh, $dirF, $MangleF, $CompressF); - } - if ( opendir(DIR, $dir) ) { - @Dir = readdir(DIR); - closedir(DIR); - } - if ( defined($NumF) && opendir(DIR, $dirF) ) { - if ( $Mangle == $MangleF ) { - @Dir = (@Dir, readdir(DIR)); - } else { - foreach my $f ( readdir(DIR) ) { - if ( $Mangle ) { - push(@Dir, $bpc->fileNameMangle($f)); - } else { - push(@Dir, $bpc->fileNameUnmangle($f)); - } - } - } - } - foreach my $f ( sort({$a cmp $b} @Dir) ) { - next if ( $f eq "." || $f eq ".." - || $f eq $fLast || ($Mangle && $f eq "attrib") ); - $fLast = $f; - my($fF) = $f; - if ( $Mangle != $MangleF ) { - $fF = $Mangle ? $bpc->fileNameUnmangle($f) - : $bpc->fileNameMangle($f); - } - if ( -e "$dir/$f" ) { - if ( -d "$dir/$f" ) { - MergeFind($fh, "$dir/$f", "$dirF/$fF"); - } else { - TarWriteFile($fh, "$dir/$f", $Mangle, $Compress); - } - } elsif ( -e "$dirF/$fF" ) { - if ( -d "$dirF/$fF" ) { - MergeFind($fh, "$dir/$f", "$dirF/$fF"); - } else { - TarWriteFile($fh, "$dirF/$fF", $MangleF, $CompressF); - } - } else { - print(STDERR "$0: Botch on $dir, $dirF, $f, $fF\n"); - $ErrorCnt++; - } - } -} diff --git a/bin/BackupPC_tarExtract b/bin/BackupPC_tarExtract index 2ba25bf..06eaf73 100755 --- a/bin/BackupPC_tarExtract +++ b/bin/BackupPC_tarExtract @@ -27,14 +27,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::Attrib qw(:all); use BackupPC::FileZIO; diff --git a/bin/BackupPC_trashClean b/bin/BackupPC_trashClean index d266327..08815d2 100755 --- a/bin/BackupPC_trashClean +++ b/bin/BackupPC_trashClean @@ -29,14 +29,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); diff --git a/bin/BackupPC_zcat b/bin/BackupPC_zcat index 71e2082..284988a 100755 --- a/bin/BackupPC_zcat +++ b/bin/BackupPC_zcat @@ -32,7 +32,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -40,7 +40,7 @@ use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use Compress::Zlib; use BackupPC::FileZIO; diff --git a/bin/BackupPC_zipCreate b/bin/BackupPC_zipCreate index d51a21f..4fa7f4a 100755 --- a/bin/BackupPC_zipCreate +++ b/bin/BackupPC_zipCreate @@ -51,14 +51,14 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # #======================================================================== use strict; -use lib "__INSTALLDIR__/lib"; +use lib "/usr/local/BackupPC/lib"; use Archive::Zip qw(:ERROR_CODES); use File::Path; use Getopt::Std; @@ -67,6 +67,7 @@ use BackupPC::Lib; use BackupPC::Attrib qw(:all); use BackupPC::FileZIO; use BackupPC::Zip::FileMember; +use BackupPC::View; die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); my $TopDir = $bpc->TopDir(); @@ -94,7 +95,6 @@ if ( $opts{n} !~ /^(\d+)$/ ) { exit(1); } my $Num = $opts{n}; - $opts{c} = 0 if ( $opts{c} eq "" ); if ( $opts{c} !~ /^(\d+)$/ ) { print(STDERR "$0: invalid compression level '$opts{c}'. 0=none, 9=max\n"); @@ -103,7 +103,7 @@ if ( $opts{c} !~ /^(\d+)$/ ) { my $compLevel = $opts{c}; my @Backups = $bpc->BackupInfoRead($Host); -my($Compress, $Mangle, $CompressF, $MangleF, $NumF, $i); +my($i); my $FileCnt = 0; my $ByteCnt = 0; my $DirCnt = 0; @@ -111,22 +111,7 @@ my $SpecialCnt = 0; my $ErrorCnt = 0; for ( $i = 0 ; $i < @Backups ; $i++ ) { - if ( !$Backups[$i]{noFill} ) { - # - # Remember the most recent filled backup - # - $NumF = $Backups[$i]{num}; - $MangleF = $Backups[$i]{mangle}; - $CompressF = $Backups[$i]{compress}; - } - next if ( $Backups[$i]{num} != $Num ); - $Compress = $Backups[$i]{compress}; - $Mangle = $Backups[$i]{mangle}; - if ( !$Backups[$i]{noFill} ) { - # no need to back-fill a filled backup - $NumF = $MangleF = $CompressF = undef; - } - last; + last if ( $Backups[$i]{num} == $Num ); } if ( $i >= @Backups ) { print(STDERR "$0: bad backup number $Num for host $Host\n"); @@ -139,11 +124,7 @@ if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ ) { print(STDERR "$0: bad share name '$opts{s}'\n"); exit(1); } -my $ShareNameOrig = $opts{s}; -my $ShareName = $Mangle ? $bpc->fileNameEltMangle($ShareNameOrig) - : $ShareNameOrig; -my $ShareNameF = $MangleF ? $bpc->fileNameEltMangle($ShareNameOrig) - : $ShareNameOrig; +my $ShareName = $opts{s}; my $BufSize = 1048576; # 1MB or 2^20 my(%UidCache, %GidCache); @@ -159,38 +140,16 @@ foreach my $dir ( @ARGV ) { sub archiveWrite { my($zipfh, $dir, $zipPathOverride) = @_; + + my $view = BackupPC::View->new($bpc, $Host, \@Backups); + if ( $dir =~ m{(^|/)\.\.(/|$)} || $dir !~ /^(.*)$/ ) { print(STDERR "$0: bad directory '$dir'\n"); $ErrorCnt++; - next; - } - (my $DirOrig = $1) =~ s{/+$}{}; - $DirOrig =~ s{^\.?/+}{}; - my($Dir, $DirF, $FullPath, $FullPathF); - if ( $DirOrig eq "" ) { - $Dir = $DirF = ""; - $FullPath = "$TopDir/pc/$Host/$Num/$ShareName"; - $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF" - if ( defined($NumF) ); - } else { - $Dir = $Mangle ? $bpc->fileNameMangle($DirOrig) : $DirOrig; - $DirF = $MangleF ? $bpc->fileNameMangle($DirOrig) : $DirOrig; - $FullPath = "$TopDir/pc/$Host/$Num/$ShareName/$Dir"; - $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF/$DirF" - if ( defined($NumF) ); - } - if ( -f $FullPath ) { - ZipWriteFile($zipfh, $FullPath, $Mangle, $Compress, $zipPathOverride); - } elsif ( -d $FullPath || (defined($NumF) && -d $FullPathF) ) { - MergeFind($zipfh, $FullPath, $FullPathF); - } elsif ( defined($NumF) && -f $FullPathF ) { - ZipWriteFile($zipfh, $FullPathF, $MangleF, $CompressF, - $zipPathOverride); - } else { - print(STDERR "$0: $Host, backup $Num, doesn't have a directory or file" - . " $ShareNameOrig/$DirOrig\n"); - $ErrorCnt++; + return; } + $view->find($Num, $ShareName, $dir, 0, \&ZipWriteFile, + $zipfh, $zipPathOverride); } # Create Zip file @@ -231,63 +190,11 @@ my $AttrDir; sub ZipWriteFile { - my($zipfh, $fullName, $mangle, $compress, $zipPathOverride) = @_; - my($tarPath); + my($hdr, $zipfh, $zipPathOverride) = @_; - if ( $fullName =~ m{^\Q$TopDir/pc/$Host/$Num/$ShareName\E(.*)} - || (defined($NumF) - && $fullName =~ m{^\Q$TopDir/pc/$Host/$NumF/$ShareNameF\E(.*)}) ) { - $tarPath = $mangle ? $bpc->fileNameUnmangle($1) : $1; - } else { - print(STDERR "Unexpected file name from find: $fullName\n"); - return; - } + my $tarPath = $hdr->{relPath}; $tarPath = $zipPathOverride if ( defined($zipPathOverride) ); - (my $dir = $fullName) =~ s{/([^/]*)$}{}; - my $fileName = $mangle ? $bpc->fileNameUnmangle($1) : $1; - if ( $mangle && $AttrDir ne $dir ) { - $AttrDir = $dir; - $Attr = BackupPC::Attrib->new({ compress => $compress }); - if ( -f $Attr->fileName($dir) && !$Attr->read($dir) ) { - print(STDERR "Can't read attribute file in $dir\n"); - $ErrorCnt++; - $Attr = undef; - } - } - my $hdr = $Attr->get($fileName) if ( defined($Attr) ); - if ( !defined($hdr) ) { - # - # No attributes. Must be an old style backup. Reconstruct - # what we can. Painful part is computing the size if compression - # is on: only method is to uncompress the file. - # - my @s = stat($fullName); - $hdr = { - type => -d _ ? BPC_FTYPE_DIR : BPC_FTYPE_FILE, - mode => $s[2], - uid => $s[4], - gid => $s[5], - size => -f _ ? $s[7] : 0, - mtime => $s[9], - }; - if ( $compress && -f _ ) { - # - # Compute the correct size by reading the whole file - # - my $f = BackupPC::FileZIO->open($fullName, 0, $compress); - if ( !defined($f) ) { - print(STDERR "Unable to open file $fullName\n"); - $ErrorCnt++; - return; - } - my($data, $size); - while ( $f->read(\$data, $BufSize) > 0 ) { - $size += length($data); - } - $f->close; - $hdr->{size} = $size; - } - } + if ( defined($PathRemove) && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) { substr($tarPath, 0, length($PathRemove)) = $PathAdd; @@ -295,6 +202,7 @@ sub ZipWriteFile $tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// ); $tarPath =~ s{//+}{/}g; $hdr->{name} = $tarPath; + my $zipmember; # Container to hold the file/directory to zip. if ( $hdr->{type} == BPC_FTYPE_DIR ) { @@ -309,10 +217,10 @@ sub ZipWriteFile # Regular file: write the header and file # $zipmember = BackupPC::Zip::FileMember->newFromFileNamed( - $fullName, + $hdr->{fullPath}, $hdr->{name}, $hdr->{size}, - $compress + $hdr->{compress} ); $FileCnt++; $ByteCnt += $hdr->{size}; @@ -323,9 +231,9 @@ sub ZipWriteFile # # Start by reading the contents of the link. # - my $f = BackupPC::FileZIO->open($fullName, 0, $compress); + my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress}); if ( !defined($f) ) { - print(STDERR "Unable to open file $fullName\n"); + print(STDERR "Unable to open file $hdr->{fullPath}\n"); $ErrorCnt++; return; } @@ -375,63 +283,3 @@ sub ZipWriteFile # Finally Zip the member $zipfh->addMember($zipmember); } - -# -# Does a recursive find of $dir, filling in from the (filled dump) -# directory $dirF. Handles the cases where $dir and $dirF might -# or might not be mangled etc. -# -sub MergeFind -{ - my($zipfh, $dir, $dirF) = @_; - - my(@Dir, $fLast); - if ( -d $dir ) { - ZipWriteFile($zipfh, $dir, $Mangle, $Compress); - } elsif ( -d $dirF ) { - ZipWriteFile($zipfh, $dirF, $MangleF, $CompressF); - } - if ( opendir(DIR, $dir) ) { - @Dir = readdir(DIR); - closedir(DIR); - } - if ( defined($NumF) && opendir(DIR, $dirF) ) { - if ( $Mangle == $MangleF ) { - @Dir = (@Dir, readdir(DIR)); - } else { - foreach my $f ( readdir(DIR) ) { - if ( $Mangle ) { - push(@Dir, $bpc->fileNameMangle($f)); - } else { - push(@Dir, $bpc->fileNameUnmangle($f)); - } - } - } - } - foreach my $f ( sort({$a cmp $b} @Dir) ) { - next if ( $f eq "." || $f eq ".." - || $f eq $fLast || ($Mangle && $f eq "attrib") ); - $fLast = $f; - my($fF) = $f; - if ( $Mangle != $MangleF ) { - $fF = $Mangle ? $bpc->fileNameUnmangle($f) - : $bpc->fileNameMangle($f); - } - if ( -e "$dir/$f" ) { - if ( -d "$dir/$f" ) { - MergeFind($zipfh, "$dir/$f", "$dirF/$fF"); - } else { - ZipWriteFile($zipfh, "$dir/$f", $Mangle, $Compress); - } - } elsif ( -e "$dirF/$fF" ) { - if ( -d "$dirF/$fF" ) { - MergeFind($zipfh, "$dir/$f", "$dirF/$fF"); - } else { - ZipWriteFile($zipfh, "$dirF/$fF", $MangleF, $CompressF); - } - } else { - print(STDERR "$0: Botch on $dir, $dirF, $f, $fF\n"); - $ErrorCnt++; - } - } -} diff --git a/cgi-bin/BackupPC_Admin b/cgi-bin/BackupPC_Admin index cb3d26e..8cca256 100755 --- a/cgi-bin/BackupPC_Admin +++ b/cgi-bin/BackupPC_Admin @@ -14,7 +14,8 @@ # user name. # # Also, this script needs to run as the BackupPC user. To accomplish -# this the script is typically installed as setuid to the BackupPC user. +# this the script is typically installed as setuid to the BackupPC user, +# or it can run under mod_perl with httpd running as the BackupPC user. # # AUTHOR # Craig Barratt @@ -38,7 +39,7 @@ # #======================================================================== # -# Version 1.5.0_CVS, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -50,6 +51,7 @@ use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; use BackupPC::Attrib qw(:all); +use BackupPC::View; use Data::Dumper; use vars qw($Cgi %In $MyURL $User %Conf $TopDir $BinDir $bpc); @@ -418,6 +420,7 @@ sub Action_View || /^\s+directory \\/ || /^Timezone is/ || /^\.\// + || /^ / ) { $skipped++; next; @@ -533,148 +536,91 @@ EOF sub Action_Browse { my $Privileged = CheckPermission($In{host}); - my($i, $dirStr, $fileStr, $mangle); - my($numF, $compressF, $mangleF, $fullDirF); - my $checkBoxCnt = 0; # checkbox counter + my($i, $dirStr, $fileStr, $attr); + my $checkBoxCnt = 0; if ( !$Privileged ) { ErrorExit(eval("qq{$Lang->{Only_privileged_users_can_browse_backup_files}}")); } - my $host = $In{host}; - my $num = $In{num}; - my $dir = $In{dir}; - if ( $host eq "" ) { - ErrorExit($Lang->{Empty_host_name}); - } + my $host = $In{host}; + my $num = $In{num}; + my $share = $In{share}; + my $dir = $In{dir}; + + ErrorExit($Lang->{Empty_host_name}) if ( $host eq "" ); # # Find the requested backup and the previous filled backup # my @Backups = $bpc->BackupInfoRead($host); for ( $i = 0 ; $i < @Backups ; $i++ ) { - if ( !$Backups[$i]{noFill} ) { - $numF = $Backups[$i]{num}; - $mangleF = $Backups[$i]{mangle}; - $compressF = $Backups[$i]{compress}; - } last if ( $Backups[$i]{num} == $num ); } if ( $i >= @Backups ) { ErrorExit("Backup number $num for host ${EscapeHTML($host)} does" . " not exist."); } - if ( !$Backups[$i]{noFill} ) { - # no need to back-fill a filled backup - $numF = $mangleF = $compressF = undef; - } my $backupTime = timeStamp2($Backups[$i]{startTime}); my $backupAge = sprintf("%.1f", (time - $Backups[$i]{startTime}) / (24 * 3600)); - $mangle = $Backups[$i]{mangle}; + my $view = BackupPC::View->new($bpc, $host, \@Backups); + if ( $dir eq "" || $dir eq "." || $dir eq ".." ) { - if ( !opendir(DIR, "$TopDir/pc/$host/$num") ) { - ErrorExit(eval("qq{$Lang->{Can_t_browse_bad_directory_name}}")); - } - # - # Read this directory and find the first directory - # - foreach my $f ( readdir(DIR) ) { - next if ( $f eq "." || $f eq ".." ); - if ( -d "$TopDir/pc/$host/$num/$f" ) { - $dir = "/$f"; - last; - } - } - closedir(DIR); - if ( $dir eq "" || $dir eq "." || $dir eq ".." ) { - ErrorExit(eval("qq{$Lang->{Directory___EscapeHTML}}")); - } - } - my $relDir = $dir; - my $fullDir = "$TopDir/pc/$host/$num/$relDir"; - if ( defined($numF) ) { - # get full path to filled backup - if ( $mangle && !$mangleF ) { - $fullDirF = "$TopDir/pc/$host/$numF/" - . $bpc->fileNameUnmangle($relDir); + $attr = $view->dirAttrib($num, "", ""); + if ( keys(%$attr) > 0 ) { + $share = (sort(keys(%$attr)))[0]; + $dir = '/'; } else { - $fullDirF = "$TopDir/pc/$host/$numF/$relDir"; + ErrorExit(eval("qq{$Lang->{Directory___EscapeHTML}}")); } } - my $currDir = undef; - # - # Read attributes for the directory and optionally for the filled backup - # - my $attr = BackupPC::Attrib->new({ compress => $Backups[$i]{compress}}); - my $attrF = BackupPC::Attrib->new({ compress => $compressF}) - if ( defined($numF) ); - $attr->read($fullDir) if ( -f $attr->fileName($fullDir) ); - if ( defined($numF) && -f $attrF->fileName($fullDirF) - && $attrF->read($fullDirF) ) { - $attr->merge($attrF); - } + my $relDir = $dir; + my $currDir = undef; + # # Loop up the directory tree until we hit the top. # my(@DirStrPrev); while ( 1 ) { - my($fLast, $fum, $fLastum, @DirStr); + my($fLast, $fLastum, @DirStr); - if ( $fullDir =~ m{(^|/)\.\.(/|$)} || !opendir(DIR, $fullDir) ) { + $attr = $view->dirAttrib($num, $share, $relDir); + if ( !defined($attr) ) { ErrorExit(eval("qq{$Lang->{Can_t_browse_bad_directory_name2}}")); } - # - # Read this directory and optionally the corresponding filled directory - # - my @Dir = readdir(DIR); - closedir(DIR); - if ( defined($numF) && opendir(DIR, $fullDirF) ) { - if ( $mangle == $mangleF ) { - @Dir = (@Dir, readdir(DIR)); - } else { - foreach my $f ( readdir(DIR) ) { - next if ( $f eq "." || $f eq ".." ); - push(@Dir, $bpc->fileNameMangle($f)); - } - } - closedir(DIR); - } + my $fileCnt = 0; # file counter $fLast = $dirStr = ""; + # # Loop over each of the files in this directory # - my(@DirUniq); - foreach my $f ( sort({uc($a) cmp uc($b)} @Dir) ) { - next if ( $f eq "." || $f eq ".." - || $f eq $fLast || ($mangle && $f eq "attrib") ); - $fLast = $f; - push(@DirUniq, $f); - } - while ( defined(my $f = shift(@DirUniq)) ) { - my $path = "$relDir/$f"; + foreach my $f ( sort(keys(%$attr)) ) { my($dirOpen, $gotDir, $imgStr, $img); - $fum = $mangle ? $bpc->fileNameUnmangle($f) : $f; # unmangled $f - my $fumURI = $fum; # URI escaped $f + my $fURI = $f; # URI escaped $f + my $shareURI = $share; # URI escaped $share + my $path = "$relDir/$f"; + if ( $shareURI eq "" ) { + $shareURI = $path; + $path = ""; + } $path =~ s{^/+}{/}; - $path =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg; - $fumURI =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg; - $dirOpen = 1 if ( defined($currDir) && $f eq $currDir ); - if ( -d "$fullDir/$f" ) { + $path =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg; + $fURI =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg; + $shareURI =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg; + $dirOpen = 1 if ( defined($currDir) && $f eq $currDir ); + if ( $attr->{$f}{type} == BPC_FTYPE_DIR ) { # # Display directory if it exists in current backup. # First find out if there are subdirs # - my @s = (defined($numF) && -d "$fullDirF/$f") - ? stat("$fullDirF/$f") - : stat("$fullDir/$f"); my($bold, $unbold, $BGcolor); $img |= 1 << 6; - $img |= 1 << 5 if ( $s[3] > 2 ); + $img |= 1 << 5 if ( $attr->{$f}{nlink} > 2 ); if ( $dirOpen ) { $bold = ""; $unbold = ""; $img |= 1 << 2; - $img |= 1 << 3 if ( $s[3] > 2 ); + $img |= 1 << 3 if ( $attr->{$f}{nlink} > 2 ); } my $imgFileName = sprintf("%07b.gif", $img); $imgStr = ""; @@ -683,12 +629,12 @@ sub Action_Browse } else { $BGcolor = ""; } - my $dirName = $fum; + my $dirName = $f; $dirName =~ s/ / /g; push(@DirStr, {needTick => 1, tdArgs => $BGcolor, link => <$imgStr $bold$dirName$unbold +$imgStr $bold$dirName$unbold EOF $fileCnt++; $gotDir = 1; @@ -723,29 +669,32 @@ EOF # This is the selected directory, so display all the files # my $attrStr; - if ( defined($a = $attr->get($fum)) ) { + if ( defined($a = $attr->{$f}) ) { my $mtimeStr = $bpc->timeStamp($a->{mtime}); - my $typeStr = $attr->fileType2Text($a->{type}); + # UGH -> fix this + my $typeStr = BackupPC::Attrib::fileType2Text(undef, + $a->{type}); my $modeStr = sprintf("0%o", $a->{mode} & 07777); $attrStr .= <$typeStr - $modeStr + $modeStr + $a->{backupNum} $a->{size} $mtimeStr EOF } else { - $attrStr .= " \n"; + $attrStr .= " \n"; } if ( $gotDir ) { $fileStr .= < ${EscapeHTML($fum)} + ${EscapeHTML($f)} $attrStr EOF } else { $fileStr .= < ${EscapeHTML($fum)} + ${EscapeHTML($f)} $attrStr EOF @@ -754,19 +703,28 @@ EOF } } @DirStrPrev = @DirStr; - last if ( $relDir eq "" ); + last if ( $relDir eq "" && $share eq "" ); # - # Prune the last directory off $relDir + # Prune the last directory off $relDir, or at the very end + # do the top-level directory. # - $relDir =~ s/(.*)\/(.*)/$1/; - $currDir = $2; - $fullDir = "$TopDir/pc/$host/$num/$relDir"; - $fullDirF = "$TopDir/pc/$host/$numF/$relDir" if ( defined($numF) ); + if ( $relDir eq "" ) { + $currDir = $share; + $share = ""; + } else { + $relDir =~ s/(.*)\/(.*)/$1/; + $currDir = $2; + } } - my $dirDisplay = $mangle ? $bpc->fileNameUnmangle($dir) : $dir; - $dirDisplay =~ s{//}{/}g; + $share = $currDir; + my $dirDisplay = "$share/$dir"; + $dirDisplay =~ s{//+}{/}g; + $dirDisplay =~ s{/+$}{}g; my $filledBackup; - if ( defined($numF) ) { + + if ( (my @mergeNums = @{$view->mergeNums}) > 1 ) { + shift(@mergeNums); + my $numF = join(", #", @mergeNums); $filledBackup = eval("qq{$Lang->{This_display_is_merged_with_backup}}"); } Header(eval("qq{$Lang->{Browse_backup__num_for__host}}")); @@ -790,6 +748,21 @@ EOF } else { $fileStr = eval("qq{$Lang->{The_directory_is_empty}}"); } + my @otherDirs; + foreach my $i ( $view->backupList($share, $dir) ) { + next if ( $i == $num ); + my $path = $dir; + my $shareURI = $share; + $path =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg; + $shareURI =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg; + push(@otherDirs, <$i +EOF + } + if ( @otherDirs ) { + my $otherDirs = join(", ", @otherDirs); + $filledBackup .= eval("qq{$Lang->{Visit_this_directory_in_backup}}"); + } print (eval("qq{$Lang->{Backup_browse_for__host}}")); Trailer(); @@ -797,21 +770,18 @@ EOF sub Action_Restore { - my($str, $reply, $i); + my($str, $reply); my $Privileged = CheckPermission($In{host}); if ( !$Privileged ) { ErrorExit(eval("qq{$Lang->{Only_privileged_users_can_restore_backup_files}}")); } - my $host = $In{host}; - my $num = $In{num}; - my(@fileList, $fileListStr, $hiddenStr, $share, $pathHdr, $badFileCnt); + my $host = $In{host}; + my $num = $In{num}; + my $share = $In{share}; + my(@fileList, $fileListStr, $hiddenStr, $pathHdr, $badFileCnt); my @Backups = $bpc->BackupInfoRead($host); - for ( $i = 0 ; $i < @Backups ; $i++ ) { - last if ( $Backups[$i]{num} == $num ); - } - my $mangle = $Backups[$i]{mangle}; - ServerConnect(); + ServerConnect(); if ( !defined($Hosts->{$host}) ) { ErrorExit(eval("qq{$Lang->{Bad_host_name}}")); } @@ -819,19 +789,14 @@ sub Action_Restore next if ( !defined($In{"fcb$i"}) ); (my $name = $In{"fcb$i"}) =~ s/%([0-9A-F]{2})/chr(hex($1))/eg; $badFileCnt++ if ( $name =~ m{(^|/)\.\.(/|$)} ); - if ( $name =~ m{^/+(.*?)(/.*)} ) { - $share = $1; - $name = $mangle ? $bpc->fileNameUnmangle($2) : $2; - if ( @fileList == 0 ) { - $pathHdr = $name; - } else { - while ( substr($name, 0, length($pathHdr)) ne $pathHdr ) { - $pathHdr = substr($pathHdr, 0, rindex($pathHdr, "/")); - } - } - } + if ( @fileList == 0 ) { + $pathHdr = $name; + } else { + while ( substr($name, 0, length($pathHdr)) ne $pathHdr ) { + $pathHdr = substr($pathHdr, 0, rindex($pathHdr, "/")); + } + } push(@fileList, $name); - $share = $mangle ? $bpc->fileNameUnmangle($share) : $share; $hiddenStr .= < EOF @@ -840,6 +805,7 @@ EOF EOF } $hiddenStr .= "\n"; + $hiddenStr .= "\n"; $badFileCnt++ if ( $In{pathHdr} =~ m{(^|/)\.\.(/|$)} ); $badFileCnt++ if ( $In{num} =~ m{(^|/)\.\.(/|$)} ); if ( @fileList == 0 ) { @@ -880,76 +846,71 @@ EOF } elsif ( $In{type} == 1 ) { # # Provide the selected files via a tar archive. + # + # We no longer use fork/exec (as in v1.5.0) since some mod_perls + # do not correctly preserve the stdout connection to the client + # browser, so we execute BackupPC_tarCreate in-line. # - $SIG{CHLD} = 'IGNORE'; - my $pid = fork(); - if ( !defined($pid) ) { - $bpc->ServerMesg(eval("qq{$Lang->{log_Can_t_fork_for_tar_restore_request_by__User}}")); - ErrorExit($Lang->{Can_t_fork_for_tar_restore}); - } - if ( $pid ) { - # - # This is the parent. - # - my @fileListTrim = @fileList; - if ( @fileListTrim > 10 ) { - @fileListTrim = (@fileListTrim[0..9], '...'); - } - $bpc->ServerMesg(eval("qq{$Lang->{log_User__User_downloaded_tar_archive_for__host}}")); - return; - } - # - # This is the child. Print the headers and run BackupPC_tarCreate. - # + my @fileListTrim = @fileList; + if ( @fileListTrim > 10 ) { + @fileListTrim = (@fileListTrim[0..9], '...'); + } + $bpc->ServerMesg(eval("qq{$Lang->{log_User__User_downloaded_tar_archive_for__host}}")); + my @pathOpts; if ( $In{relative} ) { @pathOpts = ("-r", $pathHdr, "-p", ""); } - $bpc->ServerDisconnect(); - print "Content-Type: application/x-gtar\n"; - print "Content-Transfer-Encoding: binary\n"; - print "Content-Disposition: attachment; filename=\"restore.tar\"\n\n"; - exec("$BinDir/BackupPC_tarCreate", + # + # We use syswrite since BackupPC_tarCreate uses syswrite too. + # Need to test this with mod_perl: PaulL says it doesn't work. + # + syswrite(STDOUT, <ServerMesg(eval("qq{$Lang->{log_Can_t_fork_for_zip_restore_request_by__User}}")); - ErrorExit($Lang->{Can_t_fork_for_zip_restore}); - } - if ( $pid ) { - # - # This is the parent. - # - my @fileListTrim = @fileList; - if ( @fileListTrim > 10 ) { - @fileListTrim = (@fileListTrim[0..9], '...'); - } - $bpc->ServerMesg(eval("qq{$Lang->{log_User__User_downloaded_zip_archive_for__host}}")); - return; - } - # - # This is the child. Print the headers and run BackupPC_tarCreate. - # + my @fileListTrim = @fileList; + if ( @fileListTrim > 10 ) { + @fileListTrim = (@fileListTrim[0..9], '...'); + } + $bpc->ServerMesg(eval("qq{$Lang->{log_User__User_downloaded_zip_archive_for__host}}")); + my @pathOpts; if ( $In{relative} ) { @pathOpts = ("-r", $pathHdr, "-p", ""); } - $bpc->ServerDisconnect(); - print "Content-Type: application/zip\n"; - print "Content-Transfer-Encoding: binary\n"; - print "Content-Disposition: attachment; filename=\"restore.zip\"\n\n"; + # + # We use syswrite since BackupPC_tarCreate uses syswrite too. + # Need to test this with mod_perl: PaulL says it doesn't work. + # + syswrite(STDOUT, < 'image/x-xwindowdump', 'z' => 'application/x-compress', 'zip' => 'application/zip', + %{$Conf{CgiExt2ContentType}}, # add site-specific values }; - if ( !$Privileged ) { ErrorExit(eval("qq{$Lang->{Only_privileged_users_can_restore_backup_files2}}")); } ServerConnect(); - my @Backups = $bpc->BackupInfoRead($host); - if ( $host eq "" ) { - ErrorExit($Lang->{Empty_host_name}); - } + ErrorExit($Lang->{Empty_host_name}) if ( $host eq "" ); + $dir = "/" if ( $dir eq "" ); - for ( $i = 0 ; $i < @Backups ; $i++ ) { - if ( !$Backups[$i]{noFill} ) { - $numF = $Backups[$i]{num}; - $mangleF = $Backups[$i]{mangle}; - $compressF = $Backups[$i]{compress}; - } - last if ( $Backups[$i]{num} == $num ); - } - $mangle = $Backups[$i]{mangle}; - $compress = $Backups[$i]{compress}; - if ( !$Backups[$i]{noFill} ) { - # no need to back-fill a filled backup - $numF = $mangleF = $compressF = undef; - } - my $fullPath = "$TopDir/pc/$host/$num/$dir"; - $fullPath =~ s{/+}{/}g; - if ( !-f $fullPath && defined($numF) ) { - my $dirF = $dir; - my $fullPathF; - if ( $mangle && !$mangleF ) { - $fullPathF = "$TopDir/pc/$host/$numF/" - . $bpc->fileNameUnmangle($dir); - } else { - $fullPathF = "$TopDir/pc/$host/$numF/$dir"; - } - if ( -f $fullPathF ) { - $fullPath = $fullPathF; - $compress = $compressF; - } - } - if ( $fullPath =~ m{(^|/)\.\.(/|$)} || !-f $fullPath ) { - ErrorExit(eval("qq{$Lang->{Can_t_restore_bad_file}}")); + my @Backups = $bpc->BackupInfoRead($host); + my $view = BackupPC::View->new($bpc, $host, \@Backups); + my $a = $view->fileAttrib($num, $share, $dir); + if ( $dir =~ m{(^|/)\.\.(/|$)} || !defined($a) ) { + ErrorExit("Can't restore bad file ${EscapeHTML($dir)}"); } - my $dirUM = $mangle ? $bpc->fileNameUnmangle($dir) : $dir; - my $attr = BackupPC::Attrib->new({compress => $compress}); - my $fullDir = $fullPath; - $fullDir =~ s{(.*)/.*}{$1}; - my $fileName = $1 if ( $dirUM =~ /.*\/(.*)/ ); - $attr->read($fullDir) if ( -f $attr->fileName($fullDir) ); - my $a = $attr->get($fileName); - - my $f = BackupPC::FileZIO->open($fullPath, 0, $compress); + my $f = BackupPC::FileZIO->open($a->{fullPath}, 0, $a->{compress}); my $data; if ( !$skipHardLink && $a->{type} == BPC_FTYPE_HARDLINK ) { # @@ -1198,20 +1123,15 @@ sub restoreFile $f->close; $linkName =~ s/^\.\///; my $share = $1 if ( $dir =~ /^\/?(.*?)\// ); - restoreFile($host, $num, - "$share/" . ($mangle ? $bpc->fileNameMangle($linkName) - : $linkName), 1, $dir); + restoreFile($host, $num, $share, $linkName, 1, $dir); return; } - $dirUM =~ s{//}{/}g; - $fullPath =~ s{//}{/}g; - $bpc->ServerMesg("log User $User recovered file $dirUM ($fullPath)"); + $bpc->ServerMesg("log User $User recovered file $host/$num:$share/$dir ($a->{fullPath})"); $dir = $origName if ( defined($origName) ); - $dirUM = $mangle ? $bpc->fileNameUnmangle($dir) : $dir; - my $ext = $1 if ( $dirUM =~ /\.([^\/\.]+)$/ ); + my $ext = $1 if ( $dir =~ /\.([^\/\.]+)$/ ); my $contentType = $Ext2ContentType->{lc($ext)} || "application/octet-stream"; - $fileName = $1 if ( $dirUM =~ /.*\/(.*)/ ); + my $fileName = $1 if ( $dir =~ /.*\/(.*)/ ); $fileName =~ s/"/\\"/g; print "Content-Type: $contentType\n"; print "Content-Transfer-Encoding: binary\n"; @@ -1850,7 +1770,7 @@ sub Header $title $Conf{CgiHeaders} - + EOF @@ -837,6 +833,7 @@ $Lang{fileHeader} = < + @@ -847,8 +844,11 @@ $Lang{Last_bad_XferLOG} = "Last bad XferLOG"; $Lang{Last_bad_XferLOG_errors_only} = "Last bad XferLOG (errors only)"; $Lang{This_display_is_merged_with_backup} = < This display is merged with backup #\$numF, the most recent prior - filled (full) dump. +
  • This display is merged with backup #\$numF. +EOF + +$Lang{Visit_this_directory_in_backup} = < Visit this directory in backup #\$otherDirs. EOF $Lang{Restore_Summary} = < EOF # -------------------------------- @@ -579,6 +579,7 @@ $Lang{Backup_browse_for__host} = < +
    @@ -677,7 +678,7 @@ $Lang{Can_t_browse_bad_directory_name} = "Ne peut pas parcourir " $Lang{Directory___EscapeHTML} = "Le répertoire \${EscapeHTML(\"\$TopDir/pc/\$host/\$num\")}" . " est vide"; $Lang{Can_t_browse_bad_directory_name2} = "Ne peut pas parcourir " - . " \${EscapeHTML(\$fullDir)}:" + . " \${EscapeHTML(\$relDir)}:" . " mauvais nom de répertoire"; $Lang{Only_privileged_users_can_restore_backup_files} = "Seuls les utilisateurs privilégiés peuvent restaurer " . " des fichiers de sauvegarde" @@ -687,8 +688,6 @@ $Lang{You_haven_t_selected_any_files__please_go_Back_to} = "Vous n'avez s . "vous pouvez revenir en arrière pour sélectionner des fichiers."; $Lang{Nice_try__but_you_can_t_put} = "Bien tenté, mais vous ne pouvez pas mettre \'..\' dans" . " n\'importe quel nom de fichier."; -$Lang{Can_t_fork_for_tar_restore} = "Ne peut pas se dupliquer (fork) pour la restauration par tar"; -$Lang{Can_t_fork_for_zip_restore} = "Ne peut pas se dupliquer (fork) pour la restauration par zip"; $Lang{Host__doesn_t_exist} = "L'hôte \${EscapeHTML(\$In{hostDest})} n\'existe pas."; $Lang{You_don_t_have_permission_to_restore_onto_host} = "Vous n\'avez pas la permission de restaurer sur l\'hôte" . " \${EscapeHTML(\$In{hostDest})}"; @@ -735,14 +734,10 @@ $Lang{Backup_requested_on_DHCP__host} = "Demande de sauvegarde sur l\'h $Lang{Backup_requested_on__host_by__User} = "Sauvegarde demandée sur \$host par \$User"; $Lang{Backup_stopped_dequeued_on__host_by__User} = "Sauvegarde Arrêtée/déprogrammée pour \$host par \$User"; -$Lang{log_Can_t_fork_for_tar_restore_request_by__User} = "log Ne peut pas se dupliquer (fork)" - . " pour la restauration tar demandée par \$User"; $Lang{log_User__User_downloaded_tar_archive_for__host} = "log L\'utilisateur \$User a téléchargé " . "l\'archive tar pour \$host," . " sauvegarde \$num; Les fichiers étaient: " . " \${join(\", \", \@fileListTrim)}"; -$Lang{log_Can_t_fork_for_zip_restore_request_by__User} = "log Ne peut pas se dupliquer (fork)" - . "pour la restauration zip demandée par \$User"; $Lang{log_User__User_downloaded_zip_archive_for__host}= "log L\'utilisateur \$User a téléchargé " . "l\'archive zip pour \$host," . " Sauvegarde \$num; Les fichiers étaient: " @@ -836,7 +831,7 @@ $Lang{Host_Inhost} = "H $Lang{checkAll} = <
  • EOF @@ -845,6 +840,7 @@ $Lang{fileHeader} = < + @@ -858,6 +854,12 @@ $Lang{This_display_is_merged_with_backup} = < Cet affichage est fusionné avec la sauvegarde n°\$numF, la plus récente copie intégrale. EOF +# Needs translation! +$Lang{Visit_this_directory_in_backup} = < Visit this directory in backup #\$otherDirs. +EOF + + $Lang{Restore_Summary} = < diff --git a/lib/BackupPC/Lib.pm b/lib/BackupPC/Lib.pm index 774210b..5946f1b 100644 --- a/lib/BackupPC/Lib.pm +++ b/lib/BackupPC/Lib.pm @@ -29,7 +29,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -52,24 +52,27 @@ use Digest::MD5; sub new { my $class = shift; - my($topDir) = @_; + my($topDir, $installDir) = @_; + my $self = bless { TopDir => $topDir || '/data/BackupPC', - BinDir => '/usr/local/BackupPC/bin', - LibDir => '/usr/local/BackupPC/lib', - Version => '1.5.0', + BinDir => $installDir || '/usr/local/BackupPC', + LibDir => $installDir || '/usr/local/BackupPC', + Version => '1.6.0_CVS', BackupFields => [qw( num type startTime endTime nFiles size nFilesExist sizeExist nFilesNew sizeNew xferErrs xferBadFile xferBadShare tarErrs compress sizeExistComp sizeNewComp - noFill fillFromNum mangle + noFill fillFromNum mangle xferMethod level )], RestoreFields => [qw( num startTime endTime result errorMsg nFiles size tarCreateErrs xferErrs )], }, $class; + $self->{BinDir} .= "/bin"; + $self->{LibDir} .= "/lib"; # # Clean up %ENV and setup other variables. # @@ -391,6 +394,7 @@ sub RmTreeDefer my($d) = $1; my($f) = $2; my($cwd) = Cwd::fastcwd(); + $cwd = $1 if ( $cwd =~ /(.*)/ ); $self->RmTreeQuiet($d, $f); chdir($cwd) if ( $cwd ); } @@ -405,6 +409,7 @@ sub RmTreeTrashEmpty my(@files); my($cwd) = Cwd::fastcwd(); + $cwd = $1 if ( $cwd =~ /(.*)/ ); return if ( !-d $trashDir ); my $d = DirHandle->new($trashDir) or carp "Can't read $trashDir: $!"; @@ -730,6 +735,7 @@ sub fileNameEltMangle { my($self, $name) = @_; + return "" if ( $name eq "" ); $name =~ s{([%/\n\r])}{sprintf("%%%02x", ord($1))}eg; return "f$name"; } diff --git a/lib/BackupPC/PoolWrite.pm b/lib/BackupPC/PoolWrite.pm index 1522051..123bf8d 100644 --- a/lib/BackupPC/PoolWrite.pm +++ b/lib/BackupPC/PoolWrite.pm @@ -56,7 +56,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # diff --git a/lib/BackupPC/View.pm b/lib/BackupPC/View.pm new file mode 100644 index 0000000..4cb6da2 --- /dev/null +++ b/lib/BackupPC/View.pm @@ -0,0 +1,337 @@ +#============================================================= -*-perl-*- +# +# BackupPC::View package +# +# DESCRIPTION +# +# This library defines a BackupPC::View class for merging of +# incremental backups and file attributes. This provides the +# caller with a single view of a merged backup, without worrying +# about which backup contributes which files. +# +# AUTHOR +# Craig Barratt +# +# COPYRIGHT +# Copyright (C) 2002 Craig Barratt +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +#======================================================================== +# +# Version 1.6.0_CVS, released 10 Dec 2002. +# +# See http://backuppc.sourceforge.net. +# +#======================================================================== + +package BackupPC::View; + +use strict; + +use File::Path; +use BackupPC::Lib; +use BackupPC::Attrib qw(:all); +use BackupPC::FileZIO; + +sub new +{ + my($class, $bpc, $host, $backups) = @_; + my $m = bless { + bpc => $bpc, # BackupPC::Lib object + host => $host, # host name + backups => $backups, # all backups for this host + num => -1, # backup number + idx => -1, # index into backups for backup + # we are viewing + dirPath => undef, # path to current directory + dirAttr => undef, # attributes of current directory + }, $class; + for ( my $i = 0 ; $i < @{$m->{backups}} ; $i++ ) { + next if ( defined($m->{backups}[$i]{level}) ); + $m->{backups}[$i]{level} = $m->{backups}[$i]{type} eq "full" ? 0 : 1; + } + $m->{topDir} = $m->{bpc}->TopDir(); + return $m; +} + +sub dirCache +{ + my($m, $backupNum, $share, $dir) = @_; + my($i, $level); + + $dir = "/$dir" if ( $dir !~ m{^/} ); + $dir =~ s{/+$}{}; + return if ( $m->{num} == $backupNum + && $m->{share} eq $share + && $m->{dir} eq $dir ); + if ( $m->{num} != $backupNum ) { + for ( $i = 0 ; $i < @{$m->{backups}} ; $i++ ) { + last if ( $m->{backups}[$i]{num} == $backupNum ); + } + if ( $i >= @{$m->{backups}} ) { + $m->{idx} = -1; + return; + } + $m->{num} = $backupNum; + $m->{idx} = $i; + } + $m->{files} = {}; + $level = $m->{backups}[$m->{idx}]{level} + 1; + + # + # Remember the requested share and dir + # + $m->{share} = $share; + $m->{dir} = $dir; + + # + # merge backups, starting at the requested one, and working + # backwards until we get to level 0. + # + $m->{mergeNums} = []; + for ( $i = $m->{idx} ; $level > 0 && $i >= 0 ; $i-- ) { + #print("Do $i ($m->{backups}[$i]{noFill},$m->{backups}[$i]{level})\n"); + # + # skip backups with the same or higher level + # + next if ( $m->{backups}[$i]{level} >= $level ); + + $level = $m->{backups}[$i]{level}; + $backupNum = $m->{backups}[$i]{num}; + push(@{$m->{mergeNums}}, $backupNum); + my $mangle = $m->{backups}[$i]{mangle}; + my $compress = $m->{backups}[$i]{compress}; + my $path = "$m->{topDir}/pc/$m->{host}/$backupNum/"; + my $sharePathM; + if ( $mangle ) { + $sharePathM = $m->{bpc}->fileNameEltMangle($share) + . $m->{bpc}->fileNameMangle($dir); + } else { + $sharePathM = $share . $dir; + } + $path .= $sharePathM; + #print("Opening $path\n"); + if ( !opendir(DIR, $path) ) { + if ( $i == $m->{idx} ) { + # + # Oops, directory doesn't exist. + # + $m->{files} = undef; + return; + } + next; + } + my @dir = readdir(DIR); + closedir(DIR); + my $attr; + if ( $mangle ) { + $attr = BackupPC::Attrib->new({ compress => $compress }); + if ( -f $attr->fileName($path) && !$attr->read($path) ) { + $m->{error} = "Can't read attribute file in $path"; + $attr = undef; + } + } + foreach my $file ( @dir ) { + $file = $1 if ( $file =~ /(.*)/ ); + my $fileUM = $file; + $fileUM = $m->{bpc}->fileNameUnmangle($fileUM) if ( $mangle ); + # + # skip special files + # + next if ( defined($m->{files}{$fileUM}) + || $file eq ".." + || $file eq "." + || $mangle && $file eq "attrib" ); + # + # skip directories in earlier backups (each backup always + # has the complete directory tree). + # + my @s = stat("$path/$file"); + next if ( $i < $m->{idx} && -d _ ); + if ( defined($attr) && defined(my $a = $attr->get($fileUM)) ) { + $m->{files}{$fileUM} = $a; + $attr->set($fileUM, undef); + } else { + # + # Very expensive in the non-attribute case when compresseion + # is on. We have to stat the file and read compressed files + # to determine their size. + # + $m->{files}{$fileUM} = { + type => -d _ ? BPC_FTYPE_DIR : BPC_FTYPE_FILE, + mode => $s[2], + uid => $s[4], + gid => $s[5], + size => -f _ ? $s[7] : 0, + mtime => $s[9], + }; + if ( $compress && -f _ ) { + # + # Compute the correct size by reading the whole file + # + my $f = BackupPC::FileZIO->open("$path/$file", + 0, $compress); + if ( !defined($f) ) { + $m->{error} = "Can't open $path/$file"; + } else { + my($data, $size); + while ( $f->read(\$data, 65636 * 8) > 0 ) { + $size += length($data); + } + $f->close; + $m->{files}{$fileUM}{size} = $size; + } + } + } + $m->{files}{$fileUM}{relPath} = "$dir/$fileUM"; + $m->{files}{$fileUM}{sharePathM} = "$sharePathM/$file"; + $m->{files}{$fileUM}{fullPath} = "$path/$file"; + $m->{files}{$fileUM}{backupNum} = $backupNum; + $m->{files}{$fileUM}{compress} = $compress; + $m->{files}{$fileUM}{nlink} = $s[3]; + $m->{files}{$fileUM}{inode} = $s[1]; + } + # + # Also include deleted files + # + if ( defined($attr) ) { + my $a = $attr->get; + foreach my $fileUM ( keys(%$a) ) { + next if ( $a->{$fileUM}{type} != BPC_FTYPE_DELETED ); + my $file = $fileUM; + $file = $m->{bpc}->fileNameMangle($fileUM) if ( $mangle ); + $m->{files}{$fileUM} = $a->{$fileUM}; + $m->{files}{$fileUM}{relPath} = "$dir/$fileUM"; + $m->{files}{$fileUM}{sharePathM} = "$sharePathM/$file"; + $m->{files}{$fileUM}{fullPath} = "$path/$file"; + $m->{files}{$fileUM}{backupNum} = $backupNum; + $m->{files}{$fileUM}{compress} = $compress; + $m->{files}{$fileUM}{nlink} = 0; + $m->{files}{$fileUM}{inode} = 0; + } + } + } + # + # Prune deleted files + # + foreach my $file ( keys(%{$m->{files}}) ) { + next if ( $m->{files}{$file}{type} != BPC_FTYPE_DELETED ); + delete($m->{files}{$file}); + } +} + +# +# Return the attributes of a specific file +# +sub fileAttrib +{ + my($m, $backupNum, $share, $path) = @_; + my $dir = $path; + $dir =~ s{(.*)/(.*)}{$1}; + my $file = $2; + + $m->dirCache($backupNum, $share, $dir); + return $m->{files}{$file}; +} + +# +# Return the contents of a directory +# +sub dirAttrib +{ + my($m, $backupNum, $share, $dir) = @_; + + $m->dirCache($backupNum, $share, $dir); + return $m->{files}; +} + +sub mergeNums +{ + my($m) = @_; + + return $m->{mergeNums}; +} + +sub backupList +{ + my($m, $share, $dir) = @_; + my($i, @backupList); + + $dir = "/$dir" if ( $dir !~ m{^/} ); + $dir =~ s{/+$}{}; + + for ( $i = 0 ; $i < @{$m->{backups}} ; $i++ ) { + my $backupNum = $m->{backups}[$i]{num}; + my $mangle = $m->{backups}[$i]{mangle}; + my $path = "$m->{topDir}/pc/$m->{host}/$backupNum/"; + my $sharePathM; + if ( $mangle ) { + $sharePathM = $m->{bpc}->fileNameEltMangle($share) + . $m->{bpc}->fileNameMangle($dir); + } else { + $sharePathM = $share . $dir; + } + $path .= $sharePathM; + next if ( !-d $path ); + push(@backupList, $backupNum); + } + return @backupList; +} + +# +# Do a recursive find starting at the given path (either a file +# or directory). The callback function $callback is called on each +# file and directory. The function arguments are the attrs hashref, +# and additional callback arguments. The search is depth-first if +# depth is set. Returns -1 if $path does not exist. +# +sub find +{ + my($m, $backupNum, $share, $path, $depth, $callback, @callbackArgs) = @_; + + my $attr = $m->dirAttrib($backupNum, $share, $path); + if ( !defined($attr) ) { + # + # maybe this is a file, not a directory; if so call the callback + # just on this file. + # + my $attr = $m->fileAttrib($backupNum, $share, $path); + return -1 if ( !defined($attr) ); + &$callback($attr, @callbackArgs); + return; + } + foreach my $file ( keys(%$attr) ) { + &$callback($attr->{$file}, @callbackArgs); + next if ( !$depth || $attr->{$file}{type} != BPC_FTYPE_DIR ); + # + # For depth-first, recurse as we hit each directory + # + $m->find($backupNum, $share, "$path/$file", $depth, + $callback, @callbackArgs); + } + if ( !$depth ) { + # + # For non-depth, recurse directories after we finish current dir + # + foreach my $file ( keys(%{$attr}) ) { + next if ( $attr->{$file}{type} != BPC_FTYPE_DIR ); + $m->find($backupNum, $share, "$path/$file", $depth, + $callback, @callbackArgs); + } + } +} + +1; diff --git a/lib/BackupPC/Xfer/Rsync.pm b/lib/BackupPC/Xfer/Rsync.pm new file mode 100644 index 0000000..a6d61a1 --- /dev/null +++ b/lib/BackupPC/Xfer/Rsync.pm @@ -0,0 +1,353 @@ +#============================================================= -*-perl-*- +# +# BackupPC::Xfer::Rsync package +# +# DESCRIPTION +# +# This library defines a BackupPC::Xfer::Rsync class for managing +# the rsync-based transport of backup data from the client. +# +# AUTHOR +# Craig Barratt +# +# COPYRIGHT +# Copyright (C) 2002 Craig Barratt +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +#======================================================================== +# +# Version 1.6.0_CVS, released 10 Dec 2002. +# +# See http://backuppc.sourceforge.net. +# +#======================================================================== + +package BackupPC::Xfer::Rsync; + +use strict; +use BackupPC::View; +use BackupPC::Xfer::RsyncFileIO; + +use vars qw( $RsyncLibOK ); + +BEGIN { + eval "use File::RsyncP;"; + if ( $@ ) { + # + # Rsync module doesn't exist. + # + $RsyncLibOK = 0; + } else { + $RsyncLibOK = 1; + } +}; + +sub new +{ + my($class, $bpc, $args) = @_; + + return if ( !$RsyncLibOK ); + $args ||= {}; + my $t = bless { + bpc => $bpc, + conf => { $bpc->Conf }, + host => "", + hostIP => "", + shareName => "", + badFiles => [], + %$args, + }, $class; + + return $t; +} + +sub args +{ + my($t, $args) = @_; + + foreach my $arg ( keys(%$args) ) { + $t->{$arg} = $args->{$arg}; + } +} + +sub useTar +{ + return 0; +} + +sub start +{ + my($t) = @_; + my $bpc = $t->{bpc}; + my $conf = $t->{conf}; + my(@fileList, @rsyncClientCmd, $logMsg, $incrDate); + + if ( $t->{type} eq "restore" ) { + # TODO + #push(@rsyncClientCmd, split(/ +/, $c o n f->{RsyncClientRestoreCmd})); + $logMsg = "restore not supported for $t->{shareName}"; + # + # restores are considered to work unless we see they fail + # (opposite to backups...) + # + $t->{xferOK} = 1; + } else { + # + # Turn $conf->{BackupFilesOnly} and $conf->{BackupFilesExclude} + # into a hash of arrays of files. NOT IMPLEMENTED YET. + # + $conf->{RsyncShareName} = [ $conf->{RsyncShareName} ] + unless ref($conf->{RsyncShareName}) eq "ARRAY"; + foreach my $param qw(BackupFilesOnly BackupFilesExclude) { + next if ( !defined($conf->{$param}) ); + if ( ref($conf->{$param}) eq "ARRAY" ) { + $conf->{$param} = { + $conf->{RsyncShareName}[0] => $conf->{$param} + }; + } elsif ( ref($conf->{$param}) eq "HASH" ) { + # do nothing + } else { + $conf->{$param} = { + $conf->{RsyncShareName}[0] => [ $conf->{$param} ] + }; + } + } + if ( defined($conf->{BackupFilesExclude}{$t->{shareName}}) ) { + foreach my $file ( @{$conf->{BackupFilesExclude}{$t->{shareName}}} ) + { + push(@fileList, "--exclude=$file"); + } + } + if ( defined($conf->{BackupFilesOnly}{$t->{shareName}}) ) { + foreach my $file ( @{$conf->{BackupFilesOnly}{$t->{shareName}}} ) { + push(@fileList, $file); + } + } else { + push(@fileList, "."); + } + push(@rsyncClientCmd, split(/ +/, $conf->{RsyncClientCmd})); + if ( $t->{type} eq "full" ) { + $logMsg = "full backup started for directory $t->{shareName}"; + } else { + $incrDate = $bpc->timeStampISO($t->{lastFull} - 3600, 1); + $logMsg = "incr backup started back to $incrDate for directory" + . " $t->{shareName}"; + } + $t->{xferOK} = 0; + } + # + # Merge variables into @rsyncClientCmd + # + my $vars = { + host => $t->{host}, + hostIP => $t->{hostIP}, + shareName => $t->{shareName}, + rsyncPath => $conf->{RsyncClientPath}, + sshPath => $conf->{SshPath}, + }; + my @cmd = @rsyncClientCmd; + @rsyncClientCmd = (); + foreach my $arg ( @cmd ) { + next if ( $arg =~ /^\s*$/ ); + if ( $arg =~ /^\$fileList(\+?)/ ) { + my $esc = $1 eq "+"; + foreach $arg ( @fileList ) { + $arg = $bpc->shellEscape($arg) if ( $esc ); + push(@rsyncClientCmd, $arg); + } + } elsif ( $arg =~ /^\$argList(\+?)/ ) { + my $esc = $1 eq "+"; + foreach $arg ( (@{$conf->{RsyncArgs}}, + @{$conf->{RsyncClientArgs}}) ) { + $arg = $bpc->shellEscape($arg) if ( $esc ); + push(@rsyncClientCmd, $arg); + } + } else { + $arg =~ s{\$(\w+)(\+?)}{ + defined($vars->{$1}) + ? ($2 eq "+" ? $bpc->shellEscape($vars->{$1}) : $vars->{$1}) + : "\$$1" + }eg; + push(@rsyncClientCmd, $arg); + } + } + + # + # A full dump is implemented with --ignore-times: this causes all + # files to be checksummed, even if the attributes are the same. + # That way all the file contents are checked, but you get all + # the efficiencies of rsync: only files deltas need to be + # transferred, even though it is a full dump. + # + my $rsyncArgs = $conf->{RsyncArgs}; + $rsyncArgs = [@$rsyncArgs, "--ignore-times"] if ( $t->{type} eq "full" ); + + # + # Create the Rsync object, and tell it to use our own File::RsyncP::FileIO + # module, which handles all the special BackupPC file storage + # (compression, mangling, hardlinks, special files, attributes etc). + # + $t->{rs} = File::RsyncP->new({ + logLevel => $conf->{RsyncLogLevel}, + rsyncCmd => \@rsyncClientCmd, + rsyncArgs => $rsyncArgs, + logHandler => sub { + my($str) = @_; + $str .= "\n"; + $t->{XferLOG}->write(\$str); + }, + fio => BackupPC::Xfer::RsyncFileIO->new({ + xfer => $t, + bpc => $t->{bpc}, + conf => $t->{conf}, + host => $t->{host}, + backups => $t->{backups}, + logLevel => $conf->{RsyncLogLevel}, + }), + }); + + # TODO: alarm($conf->{SmbClientTimeout}); + delete($t->{_errStr}); + + return $logMsg; +} + +sub run +{ + my($t) = @_; + my $rs = $t->{rs}; + my $conf = $t->{conf}; + + if ( $t->{XferMethod} eq "rsync" ) { + # + # Run rsync command + # + $rs->remoteStart(1, $t->{shareName}); + } else { + # + # Connect to the rsync server + # + if ( defined(my $err = $rs->serverConnect($t->{hostIP}, + $conf->{RsyncdClientPort})) ) { + $t->{hostError} = $err; + return; + } + if ( defined(my $err = $rs->serverService($t->{shareName}, + "craig", "xyz123", 0)) ) { + $t->{hostError} = $err; + return; + } + $rs->serverStart(1, "."); + } + my $error = $rs->go($t->{shareName}); + $rs->serverClose(); + + # + # TODO: generate sensible stats + # + # $rs->{stats}{totalWritten} + # $rs->{stats}{totalSize} + # + # qw(byteCnt fileCnt xferErrCnt xferBadShareCnt xferBadFileCnt + # xferOK hostAbort hostError lastOutputLine) + # + my $stats = $rs->statsFinal; + if ( !defined($error) && defined($stats) ) { + $t->{xferOK} = 1; + } else { + $t->{xferOK} = 0; + } + $t->{byteCnt} = $stats->{childStats}{TotalFileSize} + + $stats->{parentStats}{TotalFileSize}; + $t->{fileCnt} = $stats->{childStats}{TotalFileCnt} + + $stats->{parentStats}{TotalFileCnt}; + # + # TODO: get error count, and call fio to get stats... + # + $t->{hostError} = $error if ( defined($error) ); + + return ( + 0, + $stats->{childStats}{ExistFileCnt} + + $stats->{parentStats}{ExistFileCnt}, + $stats->{childStats}{ExistFileSize} + + $stats->{parentStats}{ExistFileSize}, + $stats->{childStats}{ExistFileCompSize} + + $stats->{parentStats}{ExistFileCompSize}, + $stats->{childStats}{TotalFileCnt} + + $stats->{parentStats}{TotalFileCnt}, + $stats->{childStats}{TotalFileSize} + + $stats->{parentStats}{TotalFileSize}, + ); +} + +# alarm($conf->{SmbClientTimeout}); + +sub setSelectMask +{ + my($t, $FDreadRef) = @_; +} + +sub errStr +{ + my($t) = @_; + + return $t->{_errStr}; +} + +sub xferPid +{ + my($t) = @_; + + return -1; +} + +sub logMsg +{ + my($t, $msg) = @_; + + push(@{$t->{_logMsg}}, $msg); +} + +sub logMsgGet +{ + my($t) = @_; + + return shift(@{$t->{_logMsg}}); +} + +# +# Returns a hash ref giving various status information about +# the transfer. +# +sub getStats +{ + my($t) = @_; + + return { map { $_ => $t->{$_} } + qw(byteCnt fileCnt xferErrCnt xferBadShareCnt xferBadFileCnt + xferOK hostAbort hostError lastOutputLine) + }; +} + +sub getBadFiles +{ + my($t) = @_; + + return @{$t->{badFiles}}; +} + +1; diff --git a/lib/BackupPC/Xfer/RsyncFileIO.pm b/lib/BackupPC/Xfer/RsyncFileIO.pm new file mode 100644 index 0000000..d7c19fb --- /dev/null +++ b/lib/BackupPC/Xfer/RsyncFileIO.pm @@ -0,0 +1,915 @@ +#============================================================= -*-perl-*- +# +# Rsync package +# +# DESCRIPTION +# +# AUTHOR +# Craig Barratt +# +# COPYRIGHT +# Copyright (C) 2002 Craig Barratt +# +#======================================================================== +# +# Version 1.6.0_CVS, released 10 Dec 2002. +# +# See http://backuppc.sourceforge.net. +# +#======================================================================== + +package BackupPC::Xfer::RsyncFileIO; + +use strict; +use File::Path; +use BackupPC::Attrib qw(:all); +use BackupPC::FileZIO; +use BackupPC::PoolWrite; +use Data::Dumper; + +use constant S_IFMT => 0170000; # type of file +use constant S_IFDIR => 0040000; # directory +use constant S_IFCHR => 0020000; # character special +use constant S_IFBLK => 0060000; # block special +use constant S_IFREG => 0100000; # regular +use constant S_IFLNK => 0120000; # symbolic link +use constant S_IFSOCK => 0140000; # socket +use constant S_IFIFO => 0010000; # fifo + +use vars qw( $RsyncLibOK ); + +BEGIN { + eval "use File::RsyncP::Digest"; + if ( $@ ) { + # + # Rsync module doesn't exist. + # + $RsyncLibOK = 0; + } else { + $RsyncLibOK = 1; + } +}; + +sub new +{ + my($class, $options) = @_; + + return if ( !$RsyncLibOK ); + $options ||= {}; + my $fio = bless { + blockSize => 700, + logLevel => 0, + digest => File::RsyncP::Digest->new, + checksumSeed => 0, + attrib => {}, + %$options, + }, $class; + + $fio->{shareM} = $fio->{bpc}->fileNameEltMangle($fio->{xfer}{shareName}); + $fio->{outDir} = "$fio->{xfer}{outDir}/new/"; + $fio->{outDirSh} = "$fio->{outDir}/$fio->{shareM}/"; + $fio->{view} = BackupPC::View->new($fio->{bpc}, $fio->{host}, + $fio->{backups}); + $fio->{full} = $fio->{xfer}{type} eq "full" ? 1 : 0; + $fio->{newFilesFH} = $fio->{xfer}{newFilesFH}; + $fio->{lastBkupNum} = $fio->{xfer}{lastBkupNum}; + return $fio; +} + +sub blockSize +{ + my($fio, $value) = @_; + + $fio->{blockSize} = $value if ( defined($value) ); + return $fio->{blockSize}; +} + +# +# Setup rsync checksum computation for the given file. +# +sub csumStart +{ + my($fio, $f) = @_; + my $attr = $fio->attribGet($f); + + $fio->{file} = $f; + $fio->csumEnd if ( defined($fio->{fh}) ); + return if ( $attr->{type} != BPC_FTYPE_FILE ); + if ( !defined($fio->{fh} = BackupPC::FileZIO->open($attr->{fullPath}, + 0, + $attr->{compress})) ) { + $fio->log("Can't open $attr->{fullPath}"); + return -1; + } +} + +sub csumGet +{ + my($fio, $num, $csumLen, $blockSize) = @_; + my($fileData); + + $num ||= 100; + $csumLen ||= 16; + + return if ( !defined($fio->{fh}) ); + if ( $fio->{fh}->read(\$fileData, $blockSize * $num) <= 0 ) { + return $fio->csumEnd; + } + #$fileData = substr($fileData, 0, $blockSize * $num - 2); + $fio->log(sprintf("%s: getting csum ($num,$csumLen,%d,0x%x)\n", + $fio->{file}{name}, + length($fileData), + $fio->{checksumSeed})) + if ( $fio->{logLevel} >= 10 ); + return $fio->{digest}->rsyncChecksum($fileData, $blockSize, + $csumLen, $fio->{checksumSeed}); +} + +sub csumEnd +{ + my($fio) = @_; + + return if ( !defined($fio->{fh}) ); + $fio->{fh}->close(); + delete($fio->{fh}); +} + +sub readStart +{ + my($fio, $f) = @_; + my $attr = $fio->attribGet($f); + + $fio->{file} = $f; + $fio->readEnd if ( defined($fio->{fh}) ); + if ( !defined(my $fh = BackupPC::FileZIO->open($attr->{fullPath}, + 0, + $attr->{compress})) ) { + $fio->log("Can't open $attr->{fullPath}"); + return; + } +} + +sub read +{ + my($fio, $num) = @_; + my($fileData); + + $num ||= 32768; + return if ( !defined($fio->{fh}) ); + if ( $fio->{fh}->read(\$fileData, $num) <= 0 ) { + return $fio->readEnd; + } + return \$fileData; +} + +sub readEnd +{ + my($fio) = @_; + + return if ( !defined($fio->{fh}) ); + $fio->{fh}->close; + delete($fio->{fh}); +} + +sub checksumSeed +{ + my($fio, $checksumSeed) = @_; + + $fio->{checksumSeed} = $checksumSeed; +} + +sub dirs +{ + my($fio, $localDir, $remoteDir) = @_; + + $fio->{localDir} = $localDir; + $fio->{remoteDir} = $remoteDir; +} + +sub viewCacheDir +{ + my($fio, $share, $dir) = @_; + my $shareM; + + #$fio->log("viewCacheDir($share, $dir)"); + if ( !defined($share) ) { + $share = $fio->{xfer}{shareName}; + $shareM = $fio->{shareM}; + } else { + $shareM = $fio->{bpc}->fileNameEltMangle($share); + } + $shareM = "$shareM/$dir" if ( $dir ne "" ); + return if ( defined($fio->{viewCache}{$shareM}) ); + # + # purge old cache entries (ie: those that don't match the + # first part of $dir). + # + foreach my $d ( keys(%{$fio->{viewCache}}) ) { + delete($fio->{viewCache}{$d}) if ( $shareM !~ m{^\Q$d/} ); + } + # + # fetch new directory attributes + # + $fio->{viewCache}{$shareM} + = $fio->{view}->dirAttrib($fio->{lastBkupNum}, $share, $dir); +} + +sub attribGet +{ + my($fio, $f) = @_; + my($dir, $fname, $share, $shareM); + + if ( $f->{name} =~ m{(.*)/(.*)} ) { + $shareM = $fio->{shareM}; + $dir = $1; + $fname = $2; + } elsif ( $f->{name} ne "." ) { + $shareM = $fio->{shareM}; + $dir = ""; + $fname = $f->{name}; + } else { + $share = ""; + $shareM = ""; + $dir = ""; + $fname = $fio->{xfer}{shareName}; + } + $fio->viewCacheDir($share, $dir); + $shareM .= "/$dir" if ( $dir ne "" ); + return $fio->{viewCache}{$shareM}{$fname}; +} + +sub mode2type +{ + my($fio, $mode) = @_; + + if ( ($mode & S_IFMT) == S_IFREG ) { + return BPC_FTYPE_FILE; + } elsif ( ($mode & S_IFMT) == S_IFDIR ) { + return BPC_FTYPE_DIR; + } elsif ( ($mode & S_IFMT) == S_IFLNK ) { + return BPC_FTYPE_SYMLINK; + } elsif ( ($mode & S_IFMT) == S_IFCHR ) { + return BPC_FTYPE_CHARDEV; + } elsif ( ($mode & S_IFMT) == S_IFBLK ) { + return BPC_FTYPE_BLOCKDEV; + } elsif ( ($mode & S_IFMT) == S_IFIFO ) { + return BPC_FTYPE_FIFO; + } elsif ( ($mode & S_IFMT) == S_IFSOCK ) { + return BPC_FTYPE_SOCKET; + } else { + return BPC_FTYPE_UNKNOWN; + } +} + +# +# Set the attributes for a file. Returns non-zero on error. +# +sub attribSet +{ + my($fio, $f, $placeHolder) = @_; + my($dir, $file); + + if ( $f->{name} =~ m{(.*)/(.*)} ) { + $file = $2; + $dir = "$fio->{shareM}/" . $1; + } elsif ( $f->{name} eq "." ) { + $dir = ""; + $file = $fio->{xfer}{shareName}; + } else { + $dir = $fio->{shareM}; + $file = $f->{name}; + } + + if ( !defined($fio->{attribLastDir}) || $fio->{attribLastDir} ne $dir ) { + # + # Flush any directories that don't match the first part + # of the new directory + # + foreach my $d ( keys(%{$fio->{attrib}}) ) { + next if ( $d eq "" || "$dir/" =~ m{^\Q$d/} ); + $fio->attribWrite($d); + } + $fio->{attribLastDir} = $dir; + } + if ( !exists($fio->{attrib}{$dir}) ) { + $fio->{attrib}{$dir} = BackupPC::Attrib->new({ + compress => $fio->{xfer}{compress}, + }); + my $path = $fio->{outDir} . $dir; + if ( -f $fio->{attrib}{$dir}->fileName($path) + && !$fio->{attrib}{$dir}->read($path) ) { + $fio->log(sprintf("Unable to read attribute file %s", + $fio->{attrib}{$dir}->fileName($path))); + } + } + $fio->log("attribSet(dir=$dir, file=$file)") if ( $fio->{logLevel} >= 4 ); + + $fio->{attrib}{$dir}->set($file, { + type => $fio->mode2type($f->{mode}), + mode => $f->{mode}, + uid => $f->{uid}, + gid => $f->{gid}, + size => $placeHolder ? -1 : $f->{size}, + mtime => $f->{mtime}, + }); + return; +} + +sub attribWrite +{ + my($fio, $d) = @_; + my($poolWrite); + + if ( !defined($d) ) { + # + # flush all entries (in reverse order) + # + foreach $d ( sort({$b cmp $a} keys(%{$fio->{attrib}})) ) { + $fio->attribWrite($d); + } + return; + } + return if ( !defined($fio->{attrib}{$d}) ); + # + # Set deleted files in the attributes. Any file in the view + # that doesn't have attributes is deleted. All files sent by + # rsync have attributes temporarily set so we can do deletion + # detection. We also prune these temporary attributes. + # + if ( $d ne "" ) { + my $dir; + my $share; + + $dir = $1 if ( $d =~ m{.+?/(.*)} ); + $fio->viewCacheDir(undef, $dir); + ##print("attribWrite $d,$dir\n"); + ##$Data::Dumper::Indent = 1; + ##$fio->log("attribWrite $d,$dir"); + ##$fio->log("viewCacheLogKeys = ", keys(%{$fio->{viewCache}})); + ##$fio->log("attribKeys = ", keys(%{$fio->{attrib}})); + ##print "viewCache = ", Dumper($fio->{attrib}); + ##print "attrib = ", Dumper($fio->{attrib}); + if ( defined($fio->{viewCache}{$d}) ) { + foreach my $f ( keys(%{$fio->{viewCache}{$d}}) ) { + my $name = $f; + $name = "$1/$name" if ( $d =~ m{.*?/(.*)} ); + if ( defined(my $a = $fio->{attrib}{$d}->get($f)) ) { + # + # delete temporary attributes (skipped files) + # + if ( $a->{size} < 0 ) { + $fio->{attrib}{$d}->set($f, undef); + $fio->logFileAction("skip", { + %{$fio->{viewCache}{$d}{$f}}, + name => $name, + }) if ( $fio->{logLevel} >= 2 ); + } + } else { + ##print("Delete file $f\n"); + $fio->logFileAction("delete", { + %{$fio->{viewCache}{$d}{$f}}, + name => $name, + }) if ( $fio->{logLevel} >= 1 ); + $fio->{attrib}{$d}->set($f, { + type => BPC_FTYPE_DELETED, + mode => 0, + uid => 0, + gid => 0, + size => 0, + mtime => 0, + }); + } + } + } + } + if ( $fio->{attrib}{$d}->fileCount ) { + my $data = $fio->{attrib}{$d}->writeData; + my $dirM = $d; + + $dirM = $1 . "/" . $fio->{bpc}->fileNameMangle($2) + if ( $dirM =~ m{(.*?)/(.*)} ); + my $fileName = $fio->{attrib}{$d}->fileName("$fio->{outDir}$dirM"); + $fio->log("attribWrite(dir=$d) -> $fileName") + if ( $fio->{logLevel} >= 4 ); + my $poolWrite = BackupPC::PoolWrite->new($fio->{bpc}, $fileName, + length($data), $fio->{xfer}{compress}); + $poolWrite->write(\$data); + $fio->processClose($poolWrite, $fio->{attrib}{$d}->fileName($d), + length($data), 0); + } + delete($fio->{attrib}{$d}); +} + +sub processClose +{ + my($fio, $poolWrite, $fileName, $origSize, $doStats) = @_; + my($exists, $digest, $outSize, $errs) = $poolWrite->close; + + $fileName =~ s{^/+}{}; + $fio->log(@$errs) if ( defined($errs) && @$errs ); + if ( $doStats ) { + $fio->{stats}{TotalFileCnt}++; + $fio->{stats}{TotalFileSize} += $origSize; + } + if ( $exists ) { + if ( $doStats ) { + $fio->{stats}{ExistFileCnt}++; + $fio->{stats}{ExistFileSize} += $origSize; + $fio->{stats}{ExistFileCompSize} += $outSize; + } + } elsif ( $outSize > 0 ) { + my $fh = $fio->{newFilesFH}; + print($fh "$digest $origSize $fileName\n") if ( defined($fh) ); + } + return $exists && $origSize > 0; +} + +sub statsGet +{ + my($fio) = @_; + + return $fio->{stats}; +} + +# +# Make a given directory. Returns non-zero on error. +# +sub mkpath +{ + my($fio, $f) = @_; + my $name = $1 if ( $f->{name} =~ /(.*)/ ); + my $path; + + if ( $name eq "." ) { + $path = $fio->{outDirSh}; + } else { + $path = $fio->{outDirSh} . $fio->{bpc}->fileNameMangle($name); + } + $fio->logFileAction("create", $f) if ( $fio->{logLevel} >= 1 ); + $fio->log("mkpath($path, 0777)") if ( $fio->{logLevel} >= 5 ); + $path = $1 if ( $path =~ /(.*)/ ); + File::Path::mkpath($path, 0, 0777) if ( !-d $path ); + return $fio->attribSet($f) if ( -d $path ); + $fio->log("Can't create directory $path"); + return -1; +} + +# +# Make a special file. Returns non-zero on error. +# +sub mkspecial +{ + my($fio, $f) = @_; + my $name = $1 if ( $f->{name} =~ /(.*)/ ); + my $fNameM = $fio->{bpc}->fileNameMangle($name); + my $path = $fio->{outDirSh} . $fNameM; + my $attr = $fio->attribGet($f); + my $str = ""; + my $type = $fio->mode2type($f->{mode}); + + $fio->log("mkspecial($path, $type, $f->{mode})") + if ( $fio->{logLevel} >= 5 ); + if ( $type == BPC_FTYPE_CHARDEV || $type == BPC_FTYPE_BLOCKDEV ) { + my($major, $minor, $fh, $fileData); + + $major = $f->{rdev} >> 8; + $minor = $f->{rdev} & 0xff; + $str = "$major,$minor"; + } elsif ( ($f->{mode} & S_IFMT) == S_IFLNK ) { + $str = $f->{link}; + } + # + # Now see if the file is different, or this is a full, in which + # case we create the new file. + # + my($fh, $fileData); + if ( $fio->{full} + || !defined($attr) + || $attr->{type} != $fio->mode2type($f->{mode}) + || $attr->{mtime} != $f->{mtime} + || $attr->{size} != $f->{size} + || $attr->{gid} != $f->{gid} + || $attr->{mode} != $f->{mode} + || !defined($fh = BackupPC::FileZIO->open($attr->{fullPath}, 0, + $attr->{compress})) + || $fh->read(\$fileData, length($str) + 1) != length($str) + || $fileData ne $str ) { + $fh->close if ( defined($fh) ); + $fh = BackupPC::PoolWrite->new($fio->{bpc}, $path, + length($str), $fio->{xfer}{compress}); + $fh->write(\$str); + my $exist = $fio->processClose($fh, "$fio->{shareM}/$fNameM", + length($str), 1); + $fio->logFileAction($exist ? "pool" : "create", $f) + if ( $fio->{logLevel} >= 1 ); + return $fio->attribSet($f); + } else { + $fio->logFileAction("skip", $f) if ( $fio->{logLevel} >= 2 ); + } + $fh->close if ( defined($fh) ); +} + +sub unlink +{ + my($fio, $path) = @_; + + $fio->log("Unexpected call BackupPC::Xfer::RsyncFileIO->unlink($path)"); +} + +# +# Appends to list of log messages +# +sub log +{ + my($fio, @msg) = @_; + + $fio->{log} ||= []; + push(@{$fio->{log}}, @msg); +} + +# +# Generate a log file message for a completed file +# +sub logFileAction +{ + my($fio, $action, $f) = @_; + my $owner = "$f->{uid}/$f->{gid}"; + my $type = (("", "p", "c", "", "d", "", "b", "", "", "", "l", "", "s")) + [($f->{mode} & S_IFMT) >> 12]; + + $fio->log(sprintf(" %-6s %1s%4o %9s %11.0f %s", + $action, + $type, + $f->{mode} & 07777, + $owner, + $f->{size}, + $f->{name})); +} + +# +# Returns a list of log messages +# +sub logMsg +{ + my($fio) = @_; + my $log = $fio->{log} || []; + + delete($fio->{log}); + return @$log; +} + +# +# Start receive of file deltas for a particular file. +# +sub fileDeltaRxStart +{ + my($fio, $f, $cnt, $size, $remainder) = @_; + + $fio->{rxFile} = $f; # remote file attributes + $fio->{rxLocalAttr} = $fio->attribGet($f); # local file attributes + $fio->{rxBlkCnt} = $cnt; # how many blocks we will receive + $fio->{rxBlkSize} = $size; # block size + $fio->{rxRemainder} = $remainder; # size of the last block + $fio->{rxMatchBlk} = 0; # current start of match + $fio->{rxMatchNext} = 0; # current next block of match + my $rxSize = ($cnt - 1) * $size + $remainder; + if ( $fio->{rxFile}{size} != $rxSize ) { + $fio->{rxMatchBlk} = undef; # size different, so no file match + $fio->log("$fio->{rxFile}{name}: size doesn't match" + . " ($fio->{rxFile}{size} vs $rxSize)") + if ( $fio->{logLevel} >= 5 ); + } + delete($fio->{rxInFd}); + delete($fio->{rxOutFd}); + delete($fio->{rxDigest}); + delete($fio->{rxInData}); +} + +# +# Process the next file delta for the current file. Returns 0 if ok, +# -1 if not. Must be called with either a block number, $blk, or new data, +# $newData, (not both) defined. +# +sub fileDeltaRxNext +{ + my($fio, $blk, $newData) = @_; + + if ( defined($blk) ) { + if ( defined($fio->{rxMatchBlk}) && $fio->{rxMatchNext} == $blk ) { + # + # got the next block in order; just keep track. + # + $fio->{rxMatchNext}++; + return; + } + } + my $newDataLen = length($newData); + $fio->log("$fio->{rxFile}{name}: blk=$blk, newData=$newDataLen, rxMatchBlk=$fio->{rxMatchBlk}, rxMatchNext=$fio->{rxMatchNext}") + if ( $fio->{logLevel} >= 8 ); + if ( !defined($fio->{rxOutFd}) ) { + # + # maybe the file has no changes + # + if ( $fio->{rxMatchNext} == $fio->{rxBlkCnt} + && !defined($blk) && !defined($newData) ) { + #$fio->log("$fio->{rxFile}{name}: file is unchanged"); + # if ( $fio->{logLevel} >= 8 ); + return; + } + + # + # need to open an output file where we will build the + # new version. + # + $fio->{rxFile}{name} =~ /(.*)/; + my $rxOutFileRel = "$fio->{shareM}/" . $fio->{bpc}->fileNameMangle($1); + my $rxOutFile = $fio->{outDir} . $rxOutFileRel; + $fio->{rxOutFd} = BackupPC::PoolWrite->new($fio->{bpc}, + $rxOutFile, $fio->{rxFile}{size}, + $fio->{xfer}{compress}); + $fio->log("$fio->{rxFile}{name}: opening output file $rxOutFile") + if ( $fio->{logLevel} >= 10 ); + $fio->{rxOutFile} = $rxOutFile; + $fio->{rxOutFileRel} = $rxOutFileRel; + $fio->{rxDigest} = File::RsyncP::Digest->new; + $fio->{rxDigest}->add(pack("V", $fio->{checksumSeed})); + } + if ( defined($fio->{rxMatchBlk}) + && $fio->{rxMatchBlk} != $fio->{rxMatchNext} ) { + # + # Need to copy the sequence of blocks that matched. If the file + # is compressed we need to make a copy of the uncompressed file, + # since the compressed file is not seekable. Future optimizations + # would be to keep the uncompressed file in memory (eg, up to say + # 10MB), only create an uncompressed copy if the matching + # blocks were not monotonic, and to only do this if there are + # matching blocks (eg, maybe the entire file is new). + # + my $attr = $fio->{rxLocalAttr}; + my $fh; + if ( !defined($fio->{rxInFd}) && !defined($fio->{rxInData}) ) { + if ( $attr->{compress} ) { + if ( !defined($fh = BackupPC::FileZIO->open( + $attr->{fullPath}, + 0, + $attr->{compress})) ) { + $fio->log("Can't open $attr->{fullPath}"); + return -1; + } + if ( $attr->{size} < 10 * 1024 * 1024 ) { + # + # Cache the entire old file if it is less than 10MB + # + my $data; + $fio->{rxInData} = ""; + while ( $fh->read(\$data, 10 * 1024 * 1024) > 0 ) { + $fio->{rxInData} .= $data; + } + } else { + # + # Create and write a temporary output file + # + unlink("$fio->{outDirSh}RStmp") + if ( -f "$fio->{outDirSh}RStmp" ); + if ( open(F, ">+$fio->{outDirSh}RStmp") ) { + my $data; + while ( $fh->read(\$data, 1024 * 1024) > 0 ) { + if ( syswrite(F, $data) != length($data) ) { + $fio->log(sprintf("Can't write len=%d to %s", + length($data) , "$fio->{outDirSh}RStmp")); + $fh->close; + return -1; + } + } + $fio->{rxInFd} = *F; + $fio->{rxInName} = "$fio->{outDirSh}RStmp"; + seek($fio->{rxInFd}, 0, 0); + } else { + $fio->log("Unable to open $fio->{outDirSh}RStmp"); + $fh->close; + return -1; + } + } + $fh->close; + } else { + if ( open(F, $attr->{fullPath}) ) { + $fio->{rxInFd} = *F; + $fio->{rxInName} = $attr->{fullPath}; + } else { + $fio->log("Unable to open $attr->{fullPath}"); + return -1; + } + } + } + my $lastBlk = $fio->{rxMatchNext} - 1; + $fio->log("$fio->{rxFile}{name}: writing blocks $fio->{rxMatchBlk}.." + . "$lastBlk") + if ( $fio->{logLevel} >= 10 ); + my $seekPosn = $fio->{rxMatchBlk} * $fio->{rxBlkSize}; + if ( defined($fio->{rxInFd}) && !seek($fio->{rxInFd}, $seekPosn, 0) ) { + $fio->log("Unable to seek $attr->{fullPath} to $seekPosn"); + return -1; + } + my $cnt = $fio->{rxMatchNext} - $fio->{rxMatchBlk}; + my($thisCnt, $len, $data); + for ( my $i = 0 ; $i < $cnt ; $i += $thisCnt ) { + $thisCnt = $cnt - $i; + $thisCnt = 512 if ( $thisCnt > 512 ); + if ( $fio->{rxMatchBlk} + $i + $thisCnt == $fio->{rxBlkCnt} ) { + $len = ($thisCnt - 1) * $fio->{rxBlkSize} + $fio->{rxRemainder}; + } else { + $len = $thisCnt * $fio->{rxBlkSize}; + } + if ( defined($fio->{rxInData}) ) { + $data = substr($fio->{rxInData}, $seekPosn, $len); + } else { + if ( sysread($fio->{rxInFd}, $data, $len) != $len ) { + $fio->log("Unable to read $len bytes from" + . " $fio->{rxInName} " + . "($i,$thisCnt,$fio->{rxBlkCnt})"); + return -1; + } + } + $fio->{rxOutFd}->write(\$data); + $fio->{rxDigest}->add($data); + } + $fio->{rxMatchBlk} = undef; + } + if ( defined($blk) ) { + # + # Remember the new block number + # + $fio->{rxMatchBlk} = $blk; + $fio->{rxMatchNext} = $blk + 1; + } + if ( defined($newData) ) { + # + # Write the new chunk + # + my $len = length($newData); + $fio->log("$fio->{rxFile}{name}: writing $len bytes new data") + if ( $fio->{logLevel} >= 10 ); + $fio->{rxOutFd}->write(\$newData); + $fio->{rxDigest}->add($newData); + } +} + +# +# Finish up the current receive file. Returns undef if ok, -1 if not. +# Returns 1 if the md4 digest doesn't match. +# +sub fileDeltaRxDone +{ + my($fio, $md4) = @_; + my $name = $1 if ( $fio->{rxFile}{name} =~ /(.*)/ ); + + if ( !defined($fio->{rxDigest}) ) { + # + # File was exact match, but we still need to verify the + # MD4 checksum. Therefore open and read the file. + # + $fio->{rxDigest} = File::RsyncP::Digest->new; + $fio->{rxDigest}->add(pack("V", $fio->{checksumSeed})); + my $attr = $fio->{rxLocalAttr}; + if ( defined($attr) && defined(my $fh = BackupPC::FileZIO->open( + $attr->{fullPath}, + 0, + $attr->{compress})) ) { + my $data; + while ( $fh->read(\$data, 4 * 65536) > 0 ) { + $fio->{rxDigest}->add($data); + } + $fh->close; + } else { + # error + } + $fio->log("$name got exact match") + if ( $fio->{logLevel} >= 5 ); + } + close($fio->{rxInFd}) if ( defined($fio->{rxInFd}) ); + unlink("$fio->{outDirSh}RStmp") if ( -f "$fio->{outDirSh}RStmp" ); + my $newDigest = $fio->{rxDigest}->rsyncDigest; + if ( $fio->{logLevel} >= 3 ) { + my $md4Str = unpack("H*", $md4); + my $newStr = unpack("H*", $newDigest); + $fio->log("$name got digests $md4Str vs $newStr") + } + if ( $md4 ne $newDigest ) { + $fio->log("$name md4 doesn't match") + if ( $fio->{logLevel} >= 1 ); + if ( defined($fio->{rxOutFd}) ) { + $fio->{rxOutFd}->close; + unlink($fio->{rxOutFile}); + } + return 1; + } + # + # One special case is an empty file: if the file size is + # zero we need to open the output file to create it. + # + if ( $fio->{rxFile}{size} == 0 ) { + my $rxOutFileRel = "$fio->{shareM}/" + . $fio->{bpc}->fileNameMangle($name); + my $rxOutFile = $fio->{outDir} . $rxOutFileRel; + $fio->{rxOutFd} = BackupPC::PoolWrite->new($fio->{bpc}, + $rxOutFile, $fio->{rxFile}{size}, + $fio->{xfer}{compress}); + } + if ( !defined($fio->{rxOutFd}) ) { + # + # No output file, meaning original was an exact match. + # + $fio->log("$name: nothing to do") + if ( $fio->{logLevel} >= 5 ); + my $attr = $fio->{rxLocalAttr}; + my $f = $fio->{rxFile}; + $fio->logFileAction("same", $f) if ( $fio->{logLevel} >= 1 ); + if ( $fio->{full} + || $attr->{type} != $f->{type} + || $attr->{mtime} != $f->{mtime} + || $attr->{size} != $f->{size} + || $attr->{gid} != $f->{gid} + || $attr->{mode} != $f->{mode} ) { + # + # In the full case, or if the attributes are different, + # we need to make a link from the previous file and + # set the attributes. + # + my $rxOutFile = $fio->{outDirSh} + . $fio->{bpc}->fileNameMangle($name); + if ( !link($attr->{fullPath}, $rxOutFile) ) { + $fio->log("Unable to link $attr->{fullPath} to $rxOutFile"); + return -1; + } + # + # Cumulate the stats + # + $fio->{stats}{TotalFileCnt}++; + $fio->{stats}{TotalFileSize} += $fio->{rxFile}{size}; + $fio->{stats}{ExistFileCnt}++; + $fio->{stats}{ExistFileSize} += $fio->{rxFile}{size}; + $fio->{stats}{ExistFileCompSize} += -s $rxOutFile; + return; + } + } + if ( defined($fio->{rxOutFd}) ) { + my $exist = $fio->processClose($fio->{rxOutFd}, + $fio->{rxOutFileRel}, + $fio->{rxFile}{size}, 1); + $fio->logFileAction($exist ? "pool" : "create", $fio->{rxFile}) + if ( $fio->{logLevel} >= 1 ); + } + delete($fio->{rxDigest}); + delete($fio->{rxInData}); + return; +} + +sub fileListEltSend +{ + my($fio, $name, $fList, $outputFunc) = @_; + my @s = stat($name); + + (my $n = $name) =~ s/^\Q$fio->{localDir}/$fio->{remoteDir}/; + $fList->encode({ + fname => $n, + dev => $s[0], + inode => $s[1], + mode => $s[2], + uid => $s[4], + gid => $s[5], + rdev => $s[6], + mtime => $s[9], + }); + &$outputFunc($fList->encodeData); +} + +sub fileListSend +{ + my($fio, $flist, $outputFunc) = @_; + + $fio->log("fileListSend not implemented!!"); + $fio->{view}->find($fio->{lastBkupNum}, $fio->{xfer}{shareName}, + $fio->{restoreFiles}, 1, \&fileListEltSend, + $flist, $outputFunc); +} + +sub finish +{ + my($fio, $isChild) = @_; + + # + # Flush the attributes if this is the child + # + $fio->attribWrite(undef) +} + + +sub is_tainted +{ + return ! eval { + join('',@_), kill 0; + 1; + }; +} + +1; diff --git a/lib/BackupPC/Xfer/Smb.pm b/lib/BackupPC/Xfer/Smb.pm index 5bf0619..06c1e4d 100644 --- a/lib/BackupPC/Xfer/Smb.pm +++ b/lib/BackupPC/Xfer/Smb.pm @@ -29,7 +29,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -43,6 +43,7 @@ sub new { my($class, $bpc, $args) = @_; + $args ||= {}; my $t = bless { bpc => $bpc, conf => { $bpc->Conf }, @@ -58,6 +59,20 @@ sub new return $t; } +sub args +{ + my($t, $args) = @_; + + foreach my $arg ( keys(%$args) ) { + $t->{$arg} = $args->{$arg}; + } +} + +sub useTar +{ + return 1; +} + sub start { my($t) = @_; @@ -70,12 +85,17 @@ sub start # # First propagate the PASSWD setting # + $ENV{PASSWD} = $ENV{BPC_SMB_PASSWD} if ( defined($ENV{BPC_SMB_PASSWD}) ); $ENV{PASSWD} = $conf->{SmbSharePasswd} if ( defined($conf->{SmbSharePasswd}) ); if ( !defined($ENV{PASSWD}) ) { $t->{_errStr} = "passwd not set for smbclient"; return; } + if ( !defined($conf->{SmbClientPath}) || !-x $conf->{SmbClientPath} ) { + $t->{_errStr} = '$Conf{SmbClientPath} is not a valid executable'; + return; + } if ( $t->{type} eq "restore" ) { $smbClientCmd = "$conf->{SmbClientPath} '\\\\$t->{host}\\$t->{shareName}'" @@ -234,7 +254,10 @@ sub readOutput } elsif ( /^code 0 listing / || /^code 0 opening / || /^abandoning restore/i - || /^Error: Looping in FIND_NEXT/i ) { + || /^Error: Looping in FIND_NEXT/i + || /^SUCCESS - 0/i + || /^Call timed out: server did not respond/i + ) { $t->{hostError} ||= $_; } elsif ( /smb: \\>/ || /^added interface/i diff --git a/lib/BackupPC/Xfer/Tar.pm b/lib/BackupPC/Xfer/Tar.pm index 6d313d4..7092b89 100644 --- a/lib/BackupPC/Xfer/Tar.pm +++ b/lib/BackupPC/Xfer/Tar.pm @@ -29,7 +29,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -43,6 +43,7 @@ sub new { my($class, $bpc, $args) = @_; + $args ||= {}; my $t = bless { bpc => $bpc, conf => { $bpc->Conf }, @@ -58,6 +59,20 @@ sub new return $t; } +sub args +{ + my($t, $args) = @_; + + foreach my $arg ( keys(%$args) ) { + $t->{$arg} = $args->{$arg}; + } +} + +sub useTar +{ + return 1; +} + sub start { my($t) = @_; diff --git a/lib/BackupPC/Zip/FileMember.pm b/lib/BackupPC/Zip/FileMember.pm index 39a1869..d9892fe 100644 --- a/lib/BackupPC/Zip/FileMember.pm +++ b/lib/BackupPC/Zip/FileMember.pm @@ -33,7 +33,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # diff --git a/makeDist b/makeDist index 43c6c91..0d236f7 100755 --- a/makeDist +++ b/makeDist @@ -9,8 +9,8 @@ use File::Copy; umask(0022); -my $Version = "1.5.0_CVS"; -my $ReleaseDate = "17 Sep 2002"; +my $Version = "1.6.0_CVS"; +my $ReleaseDate = "10 Dec 2002"; my $DistDir = "dist/BackupPC-$Version"; my @PerlSrc = qw( @@ -33,8 +33,11 @@ my @PerlSrc = qw( lib/BackupPC/Lang/en.pm lib/BackupPC/Lang/fr.pm lib/BackupPC/PoolWrite.pm + lib/BackupPC/View.pm lib/BackupPC/Xfer/Smb.pm lib/BackupPC/Xfer/Tar.pm + lib/BackupPC/Xfer/Rsync.pm + lib/BackupPC/Xfer/RsyncFileIO.pm lib/BackupPC/Zip/FileMember.pm cgi-bin/BackupPC_Admin ); @@ -43,15 +46,18 @@ my @PerlSrc = qw( # Check config parameters # my $ConfVars = {}; -CheckConfigParams("conf/config.pl", $ConfVars, 0); +my $errCnt; + +$errCnt += CheckConfigParams("conf/config.pl", $ConfVars, 0); $ConfVars->{BackupPCUser} = 2; $ConfVars->{CgiDir} = 2; $ConfVars->{InstallDir} = 2; $ConfVars->{CgiImageDir} = 2; foreach my $file ( @PerlSrc ) { - CheckConfigParams($file, $ConfVars, 1); + $errCnt += CheckConfigParams($file, $ConfVars, 1); } -my $errCnt; +exit(1) if ( $errCnt ); + foreach my $var ( sort(keys(%$ConfVars) ) ) { next if ( $ConfVars->{$var} >= 2 || $var =~ /^\$/ ); printf("Unused config parameter $var\n"); @@ -100,6 +106,7 @@ foreach my $file ( (@PerlSrc, } rmtree("doc", 0, 0); system("cd dist ; tar zcf BackupPC-$Version.tar.gz BackupPC-$Version"); +print("Distribution written to dist/BackupPC-$Version.tar.gz\n"); ########################################################################### # Subroutines @@ -135,9 +142,9 @@ sub InstallFile } elsif ( $file =~ /Lib.pm/ && /(.*Version *=> .*)'[\w\d\.]+',/ ) { print OUT "$1'$Version',\n"; } elsif ( $file =~ /Lib.pm/ && /(.*BinDir *=> .*)'.*',/ ) { - print OUT "$1'__INSTALLDIR__/bin',\n"; + print OUT "$1'__INSTALLDIR__',\n"; } elsif ( $file =~ /Lib.pm/ && /(.*LibDir *=> .*)'.*',/ ) { - print OUT "$1'__INSTALLDIR__/lib',\n"; + print OUT "$1'__INSTALLDIR__',\n"; } else { print OUT; } @@ -211,19 +218,20 @@ sub config2pod sub CheckConfigParams { my($file, $vars, $check) = @_; + my $errors; open(F, $file) || die("can't open $file\n"); if ( $check ) { while ( ) { s/\$self->{Conf}{([^}\$]+)}/if ( !defined($vars->{$1}) ) { print("Unexpected Conf var $1 in $file\n"); - exit(1); + $errors++; } else { $vars->{$1}++; }/eg; s/\$[Cc]onf(?:->)?{([^}\$]+)}/if ( !defined($vars->{$1}) ) { print("Unexpected Conf var $1 in $file\n"); - exit(1); + $errors++; } else { $vars->{$1}++; }/eg; @@ -235,4 +243,5 @@ sub CheckConfigParams } } close(F); + return $errors; } -- 2.20.1
    EOF diff --git a/conf/config.pl b/conf/config.pl index 1c0e20d..313ef51 100644 --- a/conf/config.pl +++ b/conf/config.pl @@ -622,6 +622,75 @@ $Conf{TarClientRestoreCmd} = '$sshPath -q -l root $host' # $Conf{TarClientPath} = '/bin/tar'; +# +# Path to rsync executable on the client +# +$Conf{RsyncClientPath} = '/bin/rsync'; + +# +# Full command to run rsync on the client machine +# +$Conf{RsyncClientCmd} = '$sshPath -q -l root $host $rsyncPath $argList'; + +# +# Full command to run rsync for restore on the client. +# +## $Conf{RsyncClientRestoreCmd} = ''; + +# +# Share name to backup. For $Conf{XferMethod} = "rsync" this should +# be a directory name, eg '/' or '/home'. For $Conf{XferMethod} = "rsyncd" +# this should be the name of the module to backup (ie: the name from +# /etc/rsynd.conf). +# +$Conf{RsyncShareName} = '/'; + +# +# Rsync daemon port on the client, for $Conf{XferMethod} = "rsyncd". +# +$Conf{RsyncdClientPort} = 873; + +# +# Key arguments to rsync server. Do not edit these unless you +# have a very thorough understanding of how File::RsyncP works. +# Really, do not edit these. See $Conf{RsyncClientArgs} instead. +# +$Conf{RsyncArgs} = [ + # + # Do not edit these! + # + '--numeric-ids', + '--perms', + '--owner', + '--group', + '--devices', + '--links', + '--block-size=2048', + '--relative', + '--recursive', +]; + +# +# Additional Rsync arguments that are given to the remote (client) +# rsync. Unfortunately you need a pretty good understanding of +# File::RsyncP to know which arguments will work; not all will. +# Examples that should work are --exclude/--include, eg: +# +# $Conf{RsyncClientArgs} = [ +# '--exclude', '*.tmp', +# ]; +# +$Conf{RsyncClientArgs} = [ +]; + +# +# Amount of verbosity in Rsync Xfer log files. 0 means be quiet, +# 1 will give some general information, 2 will give one line per file, +# 3 will include skipped files, higher values give more output. +# 10 will include byte dumps of all data read/written, which will +# make the log files huge. +# +$Conf{RsyncLogLevel} = 2; # # Full path for ssh. Security caution: normal users should not # allowed to write to this file or directory. @@ -978,11 +1047,13 @@ $Conf{CgiHeaderFontSize} = '3'; # # Color scheme for CGI interface. Default values give a very light blue -# for the background navigation color and green for the header background. -# (You call tell I'm a better programmer than graphical designer.) +# for the background navigation color, green for the header background, +# and white for the body background. (You call tell I should stick to +# programming and not graphical design.) # $Conf{CgiNavBarBgColor} = '#ddeeee'; $Conf{CgiHeaderBgColor} = '#99cc33'; +$Conf{CgiBodyBgColor} = '#ffffff'; # # Additional CGI header text. For example, if you wanted each CGI page @@ -1002,6 +1073,18 @@ $Conf{CgiHeaders} = ''; # $Conf{CgiImageDir} = ''; +# +# Additional mappings of file name extenions to Content-Type for +# individual file restore. See $Ext2ContentType in BackupPC_Admin +# for the default setting. You can add additional settings here, +# or override any default settings. Example: +# +# $Conf{CgiExt2ContentType} = { +# 'pl' => 'text/plain', +# }; +# +$Conf{CgiExt2ContentType} = { }; + # # URL (without the leading http://host) for BackupPC's image directory. # The CGI script uses this value to serve up image files. diff --git a/configure.pl b/configure.pl index 8b7afed..b7a427a 100755 --- a/configure.pl +++ b/configure.pl @@ -90,7 +90,7 @@ while ( 1 ) { last if ( $ConfigPath eq "" || ($ConfigPath =~ /^\// && -r $ConfigPath && -w $ConfigPath) ); my $problem = "is not an absolute path"; - $problem = "is no writable" if ( !-w $ConfigPath ); + $problem = "is not writable" if ( !-w $ConfigPath ); $problem = "is not readable" if ( !-r $ConfigPath ); $problem = "doesn't exist" if ( !-f $ConfigPath ); print("The file '$ConfigPath' $problem.\n"); @@ -99,7 +99,7 @@ my $bpc; if ( $ConfigPath ne "" && -r $ConfigPath ) { (my $topDir = $ConfigPath) =~ s{/[^/]+/[^/]+$}{}; die("BackupPC::Lib->new failed\n") - if ( !($bpc = BackupPC::Lib->new($topDir)) ); + if ( !($bpc = BackupPC::Lib->new($topDir, ".")) ); %Conf = $bpc->Conf(); %OrigConf = %Conf; $Conf{TopDir} = $topDir; @@ -124,6 +124,7 @@ my %Programs = ( 'gtar/tar' => "TarClientPath", smbclient => "SmbClientPath", nmblookup => "NmbLookupPath", + rsync => "RsyncClientPath", ping => "PingPath", df => "DfPath", 'ssh2/ssh' => "SshPath", @@ -327,6 +328,8 @@ needs to know the URL to access these images. Example: Apache image directory: /usr/local/apache/htdocs/BackupPC URL for image directory: /BackupPC +The URL for the image directory should start with a slash. + EOF while ( 1 ) { $Conf{CgiImageDir} = prompt("--> Apache image directory (full path)", @@ -334,9 +337,9 @@ EOF last if ( $Conf{CgiImageDir} =~ /^\// ); } while ( 1 ) { - $Conf{CgiImageDirURL} = prompt("--> URL for image directory (omit http://host)", + $Conf{CgiImageDirURL} = prompt("--> URL for image directory (omit http://host; starts with '/')", $Conf{CgiImageDirURL}); - last if ( $Conf{CgiImageDirURL} ne "" ); + last if ( $Conf{CgiImageDirURL} =~ /^\// ); } } @@ -409,9 +412,10 @@ unlink("$Conf{InstallDir}/bin/BackupPC_queueAll"); printf("Installing library in $Conf{InstallDir}/lib\n"); foreach my $lib ( qw(BackupPC/Lib.pm BackupPC/FileZIO.pm BackupPC/Attrib.pm - BackupPC/PoolWrite.pm BackupPC/Xfer/Tar.pm BackupPC/Xfer/Smb.pm - BackupPC/Zip/FileMember.pm - BackupPC/Lang/en.pm BackupPC/Lang/fr.pm + BackupPC/PoolWrite.pm BackupPC/View.pm BackupPC/Xfer/Tar.pm + BackupPC/Xfer/Smb.pm BackupPC/Xfer/Rsync.pm + BackupPC/Xfer/RsyncFileIO.pm BackupPC/Zip/FileMember.pm + BackupPC/Lang/en.pm BackupPC/Lang/fr.pm ) ) { InstallFile("lib/$lib", "$Conf{InstallDir}/lib/$lib", 0444); } @@ -461,13 +465,14 @@ $Conf{IncrFill} = 0; # if ( $^O eq "solaris" || $^O eq "sunos" ) { $Conf{PingArgs} ||= '-s $host 56 1'; -} elsif ( $^O eq "linux" || $^O eq "openbsd" || $^O eq "netbsd" ) { +} elsif ( ($^O eq "linux" || $^O eq "openbsd" || $^O eq "netbsd") + && !system("$Conf{PingClientPath} -c 1 -w 3 localhost") ) { $Conf{PingArgs} ||= '-c 1 -w 3 $host'; } else { $Conf{PingArgs} ||= '-c 1 $host'; } -my $confCopy = "$dest.pre-1.4.0.b1"; +my $confCopy = "$dest.pre-__VERSION__"; if ( -f $dest && !-f $confCopy ) { # # Make copy of config file, preserving ownership and modes diff --git a/doc-src/BackupPC.pod b/doc-src/BackupPC.pod index 8958782..a7dd0e2 100644 --- a/doc-src/BackupPC.pod +++ b/doc-src/BackupPC.pod @@ -42,8 +42,10 @@ cancel backups and browse and restore files from backups. No client-side software is needed. On WinXX the standard smb protocol is used to extract backup data. On linux or unix clients, -tar over ssh/rsh/nfs is used to extract backup data (or alternatively -Samba can be installed on the linux or unix client to provide smb shares). +rsync or tar (over ssh/rsh/nfs) is used to extract backup data. +Alternatively, rsync can also be used on WinXX (using cygwin), +and Samba could be installed on the linux or unix client to +provide smb shares). =item * @@ -51,7 +53,7 @@ Flexible restore options. Single files can be downloaded from any backup directly from the CGI interface. Zip or Tar archives for selected files or directories from any backup can also be downloaded from the CGI interface. Finally, direct restore to -the client machine (using SMB or tar) for selected files or +the client machine (using smb or tar) for selected files or directories is also supported from the CGI interface. =item * @@ -97,12 +99,18 @@ a smaller number of very old full backups. =item Incremental Backup An incremental backup is a backup of files that have changed (based on their -modification time) since the last successful full backup. To be safe, -BackupPC backups all files that have changed since one hour prior to the -start of the last successful full backup. BackupPC can also be configured -to keep a certain number of incremental backups, and to keep a smaller -number of very old incremental backups. (BackupPC does not support -multi-level incremental backups, although it would be easy to do so.) +modification time) since the last successful full backup. For SMB and +tar, BackupPC backups all files that have changed since one hour prior to the +start of the last successful full backup. Rsync is more clever: any files +who attributes have changed (ie: uid, gid, mtime, modes, size) since the +last full are backed up. Deleted and new files are also detected by +Rsync incrementals (SMB and tar are not able to detect deleted files or +new files whose modification time is prior to the last full dump. + +BackupPC can also be configured to keep a certain number of incremental +backups, and to keep a smaller number of very old incremental backups. +(BackupPC does not support multi-level incremental backups, although it +would be easy to do so.) BackupPC's CGI interface "fills-in" incremental backups based on the last full backup, giving every backup a "full" appearance. This makes @@ -200,7 +208,13 @@ Do not send subscription requests to this address! =item Other Programs of Interest If you want to mirror linux or unix files or directories to a remote server -you should look at rsync, L. +you should consider rsync, L. BackupPC uses +rsync as a transport mechanism; if you are already an rsync user you +can think of BackupPC as adding efficient storage (compression and +pooling) and a convenient user interface to rsync. + +Unison is a utility that can do two-way, interactive, synchronization. +See L. Two popular open source packages that do tape backup are Amanda (L) and @@ -272,10 +286,11 @@ appreciated. Even negative feedback is helpful, for example "We evaluated BackupPC but didn't use it because it doesn't ...". Beyond being a satisfied user and telling other people about it, everyone -is encouraged to add links to backuppc.sourceforge.net (I'll see then -via Google) or otherwise publicize BackupPC. Unlike the commercial -products in this space, I have a zero budget (in both time and money) -for marketing, PR and advertising, so it's up to all of you! +is encouraged to add links to L (I'll +see then via Google) or otherwise publicize BackupPC. Unlike the +commercial products in this space, I have a zero budget (in both +time and money) for marketing, PR and advertising, so it's up to +all of you! Also, everyone is encouraged to contribute patches, bug reports, feature and design suggestions, code, and documentation corrections or @@ -319,10 +334,14 @@ L. =item * -Perl modules Compress::Zlib and Archive::Zip. Try "perldoc Compress::Zlib" -and "perldoc Archive::Zip" to see if you have these modules. If not, -fetch them from L and see the instructions below -for how to build and install them. +Perl modules Compress::Zlib, Archive::Zip and Rsync. Try "perldoc +Compress::Zlib" and "perldoc Archive::Zip" to see if you have these +modules. If not, fetch them from L and see the +instructions below for how to build and install them. + +The Rsync module is available from L. +You'll need to install the Rsync module if you want to use Rsync as +a transport method. =item * @@ -348,6 +367,15 @@ As of July 2002 the latest versons is 1.13.25. =item * +If you are using rsync to backup linux/unix machines you should have +version 2.5.5 on each client machine. See L. +Use "rsync --version" to check your version. + +For BackupPC to use Rsync you will also need to install the perl Rsync +module, which is available from L. + +=item * + The Apache web server, see L, preferably built with mod_perl support. @@ -404,10 +432,11 @@ Download the latest version from L. First off, to enable compression, you will need to install Compress::Zlib from L. It is optional, but strongly recommended. -Also, to support restore via Zip archives you will need to install +To support restore via Zip archives you will need to install Archive::Zip, also from L. You can run "perldoc Compress::Zlib" to see if this module is installed. -To build and install these packages you should run these commands: +Finally, you will need the Rsync module. To build and install these +packages you should run these commands: tar zxvf Archive-Zip-1.01.tar.gz cd Archive-Zip-1.01 @@ -486,13 +515,18 @@ share password: =item * -As an environment variable PASSWD set before BackupPC starts. If you -start BackupPC manually this PASSWD must be set manually first. +As an environment variable BPC_SMB_PASSWD set before BackupPC starts. +If you start BackupPC manually the BPC_SMB_PASSWD variable must be set +manually first. For backward compatability for v1.5.0 and prior, the +environment variable PASSWD can be used if BPC_SMB_PASSWD is not set. +Warning: on some systems it is possible to see environment variables of +running processes. =item * -Alternatively the PASSWD setting can be included in /etc/init.d/backuppc, -in which case you must make sure this file is not world (other) readable. +Alternatively the BPC_SMB_PASSWD setting can be included in +/etc/init.d/backuppc, in which case you must make sure this file +is not world (other) readable. =item * @@ -2108,11 +2142,15 @@ This is especially troublesome for Outlook, which stores all its data in a single large file and keeps it locked whenever it is running. Since many users keep Outlook running all the time their machine is up their Outlook file will not be backed up. Sadly, this file -is the most important file to backup. How do commercial backup -systems solve this problem? +is the most important file to backup. As one workaround, Microsoft has +a user-level application that periodically asks the user if they want to +make a copy of their outlook.pst file. This copy can then be backed up +by BackupPC. See L. Similarly, all of the data for WinXX services like SQL databases, -Exchange etc won't be backed up. +Exchange etc won't be backed up. If these applications support +some kind of export or utility to save their data to disk then this +can =used to create files that BackupPC can backup. So far, the best that BackupPC can do is send warning emails to the user saying that their outlook files haven't been backed up in diff --git a/lib/BackupPC/Attrib.pm b/lib/BackupPC/Attrib.pm index f5c497e..b5159a3 100644 --- a/lib/BackupPC/Attrib.pm +++ b/lib/BackupPC/Attrib.pm @@ -30,7 +30,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # @@ -59,6 +59,7 @@ use constant BPC_FTYPE_DIR => 5; use constant BPC_FTYPE_FIFO => 6; use constant BPC_FTYPE_SOCKET => 8; use constant BPC_FTYPE_UNKNOWN => 9; +use constant BPC_FTYPE_DELETED => 10; my @FILE_TYPES = qw( BPC_FTYPE_FILE @@ -70,6 +71,7 @@ my @FILE_TYPES = qw( BPC_FTYPE_FIFO BPC_FTYPE_SOCKET BPC_FTYPE_UNKNOWN + BPC_FTYPE_DELETED ); # @@ -85,6 +87,8 @@ my @FileType2Text = ( "fifo", "?", "socket", + "?", + "deleted", ); # @@ -139,13 +143,18 @@ sub set { my($a, $fileName, $attrib) = @_; - $a->{files}{$fileName} = $attrib; + if ( !defined($attrib) ) { + delete($a->{files}{$fileName}); + } else { + $a->{files}{$fileName} = $attrib; + } } sub get { my($a, $fileName) = @_; - return $a->{files}{$fileName}; + return $a->{files}{$fileName} if ( defined($fileName) ); + return $a->{files}; } sub fileType2Text diff --git a/lib/BackupPC/FileZIO.pm b/lib/BackupPC/FileZIO.pm index 144415c..05f28e6 100644 --- a/lib/BackupPC/FileZIO.pm +++ b/lib/BackupPC/FileZIO.pm @@ -29,7 +29,7 @@ # #======================================================================== # -# Version 1.5.0, released 2 Aug 2002. +# Version 1.6.0_CVS, released 10 Dec 2002. # # See http://backuppc.sourceforge.net. # diff --git a/lib/BackupPC/Lang/en.pm b/lib/BackupPC/Lang/en.pm index a289cc6..6b911d2 100644 --- a/lib/BackupPC/Lang/en.pm +++ b/lib/BackupPC/Lang/en.pm @@ -223,7 +223,7 @@ EOF # -------------------------------- $Lang{Backup_PC__Log_File__file} = "BackupPC: Log File \$file"; $Lang{Log_File__file__comment} = < EOF # -------------------------------- @@ -580,6 +580,7 @@ $Lang{Backup_browse_for__host} = < +
    @@ -675,15 +676,13 @@ $Lang{Can_t_browse_bad_directory_name} = "Can\'t browse bad directory name" $Lang{Directory___EscapeHTML} = "Directory \${EscapeHTML(\"\$TopDir/pc/\$host/\$num\")}" . " is empty"; $Lang{Can_t_browse_bad_directory_name2} = "Can\'t browse bad directory name" - . " \${EscapeHTML(\$fullDir)}"; + . " \${EscapeHTML(\$relDir)}"; $Lang{Only_privileged_users_can_restore_backup_files} = "Only privileged users can restore backup files" . " for host \${EscapeHTML(\$In{host})}."; $Lang{Bad_host_name} = "Bad host name \${EscapeHTML(\$host)}"; $Lang{You_haven_t_selected_any_files__please_go_Back_to} = "You haven\'t selected any files; please go Back to" . " select some files."; $Lang{Nice_try__but_you_can_t_put} = "Nice try, but you can\'t put \'..\' in any of the file names"; -$Lang{Can_t_fork_for_tar_restore} = "Can\'t fork for tar restore"; -$Lang{Can_t_fork_for_zip_restore} = "Can\'t fork for zip restore"; $Lang{Host__doesn_t_exist} = "Host \${EscapeHTML(\$In{hostDest})} doesn\'t exist"; $Lang{You_don_t_have_permission_to_restore_onto_host} = "You don\'t have permission to restore onto host" . " \${EscapeHTML(\$In{hostDest})}"; @@ -727,13 +726,10 @@ $Lang{Backup_requested_on_DHCP__host} = "Backup requested on DHCP \$host (\$In{h $Lang{Backup_requested_on__host_by__User} = "Backup requested on \$host by \$User"; $Lang{Backup_stopped_dequeued_on__host_by__User} = "Backup stopped/dequeued on \$host by \$User"; -$Lang{log_Can_t_fork_for_tar_restore_request_by__User} = "log Can\'t fork for tar restore request by \$User"; $Lang{log_User__User_downloaded_tar_archive_for__host} = "log User \$User downloaded tar archive for \$host," . " backup \$num; files were: " . " \${join(\", \", \@fileListTrim)}"; -$Lang{log_Can_t_fork_for_zip_restore_request_by__User} = "log Can\'t fork for zip restore request by \$User"; - $Lang{log_User__User_downloaded_zip_archive_for__host}= "log User \$User downloaded zip archive for \$host," . " backup \$num; files were: " . "\${join(\", \", \@fileListTrim)}"; @@ -828,7 +824,7 @@ $Lang{Host_Inhost} = "Host \$In{host}"; $Lang{checkAll} = <
     Select all - +
    Name Type Mode Backup# Size Mod time
     Tout sélectionner - +
    Nom Type Mode Sauvegarde n° Taille Date modification