From: cbarratt Date: Sun, 15 Apr 2007 07:28:39 +0000 (+0000) Subject: * Added some performance improvements to BackupPC::Xfer::RsyncFileIO X-Git-Tag: v3_1_0beta0~5 X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=commitdiff_plain;h=3f3d4f4adbd990e15969d9cbc5e99e89e613e502 * Added some performance improvements to BackupPC::Xfer::RsyncFileIO for the case of small files with cached checksums. * Added optional support for IO::Dirent which allows inode information to be extracted from the dirent directory structure. This allows BackupPC to order some directory operations by inode, which on some file systems (eg: ext3) can results in a 20-30% performance gain. On other file systems there is no real improvement. This optimization is turned on automatically if IO::Dirent is installed. * Added sorting by column feature to host summary table in CGI interface. Implemented by Jeremy Tietsort. * Added FreeBSD init.d file provided by Gabriel Rossetti. * Applied small patch from Sergey to lib/BackupPC/Xfer/Tar.pm that makes it ignore "socket ignored" error on incrementals. * Applied small patch from Sergey to bin/BackupPC_archiveHost. * Added RsyncdUserName to the config editor. Reported by Vicent Roca Daniel. * configure.pl clears $Conf{ParPath} if it doesn't point to a valid executable. * Added freebsd-backuppc init.d script from Gabriel Rossetti. * Added documentation for BackupPC_tarPCCopy, including use of -P option to tar suggested by Daniel Berteaud. * Config editor now removes white space at start of exec path. Reported by Christoph Iwasjuta. * CgiDateFormatMMDD == 2 gives a YYYY-MM-DD format for CGI dates, suggested by Imre. --- diff --git a/ChangeLog b/ChangeLog index 6a9400e..ffcc7ca 100644 --- a/ChangeLog +++ b/ChangeLog @@ -21,6 +21,46 @@ # Version __VERSION__, __RELEASEDATE__ #------------------------------------------------------------------------ +* Added some performance improvements to BackupPC::Xfer::RsyncFileIO + for the case of small files with cached checksums. + +* Added optional support for IO::Dirent which allows inode information + to be extracted from the dirent directory structure. This allows + BackupPC to order some directory operations by inode, which on + some file systems (eg: ext3) can results in a 20-30% performance + gain. On other file systems there is no real improvement. This + optimization is turned on automatically if IO::Dirent is installed. + +* Added sorting by column feature to host summary table in CGI + interface. Implemented by Jeremy Tietsort. + +* Added FreeBSD init.d file provided by Gabriel Rossetti. + +* Applied small patch from Sergey to lib/BackupPC/Xfer/Tar.pm that makes + it ignore "socket ignored" error on incrementals. + +* Applied small patch from Sergey to bin/BackupPC_archiveHost. + +* Added RsyncdUserName to the config editor. Reported by Vicent Roca Daniel. + +* configure.pl clears $Conf{ParPath} if it doesn't point to a valid + executable. + +* Added freebsd-backuppc init.d script from Gabriel Rossetti. + +* Added documentation for BackupPC_tarPCCopy, including use of -P option + to tar suggested by Daniel Berteaud. + +* Config editor now removes white space at start of exec path. + Reported by Christoph Iwasjuta. + +* CgiDateFormatMMDD == 2 gives a YYYY-MM-DD format for CGI dates, + suggested by Imre. + +#------------------------------------------------------------------------ +# Version 3.0.0, 28 Jan 2007 +#------------------------------------------------------------------------ + * BackupPC_sendEmail now correctly sends admin email if backups were skipped because the disk was too full, reported by Dan Pritts. diff --git a/bin/BackupPC_archiveHost b/bin/BackupPC_archiveHost index 330abec..8e6c483 100755 --- a/bin/BackupPC_archiveHost +++ b/bin/BackupPC_archiveHost @@ -105,7 +105,9 @@ if ( -x "/bin/csh" ) { exit(1); } my $cmd = "$tarCreate -t -h $host -n $bkupNum -s $share . "; -$cmd .= "| $compPath " if ( $compPath ne "cat" && $compPath ne "" ); +$cmd .= "| $compPath " if ( $compPath ne "cat" + && $compPath ne "/bin/cat + && $compPath ne "" ); if ( -b $outLoc || -c $outLoc || -f $outLoc ) { # # Output file is a device or a regular file, so don't use split @@ -133,7 +135,7 @@ print("$mesg\n"); # my $ret = system(@shell, $cmd); if ( $ret ) { - print("Executing: @shell -cf $cmd\n"); + print("Executing: @shell $cmd\n"); print("Error: $tarCreate, compress or split failed\n"); exit(1); } diff --git a/bin/BackupPC_dump b/bin/BackupPC_dump index 9235a4c..28f3ef7 100755 --- a/bin/BackupPC_dump +++ b/bin/BackupPC_dump @@ -875,7 +875,7 @@ for my $shareName ( @$ShareNames ) { $stat{xferOK} = 0 if ( $stat{hostError} || $stat{hostAbort} ); if ( !$stat{xferOK} ) { # - # kill off the tranfer program, first nicely then forcefully + # kill off the transfer program, first nicely then forcefully # if ( @xferPid ) { kill($bpc->sigName2num("INT"), @xferPid); diff --git a/bin/BackupPC_nightly b/bin/BackupPC_nightly index 19d1879..22782d1 100755 --- a/bin/BackupPC_nightly +++ b/bin/BackupPC_nightly @@ -64,11 +64,10 @@ use strict; no utf8; use lib "/usr/local/BackupPC/lib"; -use BackupPC::Lib; +use BackupPC::Lib qw( :BPC_DT_ALL ); use BackupPC::FileZIO; use Getopt::Std; -use File::Find; use File::Path; use Data::Dumper; @@ -153,7 +152,7 @@ for my $pool ( qw(pool cpool) ) { $fileLinkMax = 0; $fileCntRename = 0; %FixList = (); - find({wanted => \&GetPoolStats}, "$TopDir/$pool/$dir") + $bpc->find({wanted => \&GetPoolStats}, "$TopDir/$pool/$dir") if ( -d "$TopDir/$pool/$dir" ); my $kb = $blkCnt / 2; my $kbRm = $blkCntRm / 2; @@ -260,7 +259,8 @@ sub doBackupInfoUpdate sub GetPoolStats { - my($inode, $nlinks, $nblocks) = (lstat($_))[1, 3, 12]; + my($file, $fullPath) = @_; + my($inode, $nlinks, $nblocks) = (lstat($file))[1, 3, 12]; if ( -d _ ) { $dirCnt++; @@ -279,7 +279,6 @@ sub GetPoolStats # pool files vs removing pool files. (Other aspects of the # design should eliminate race conditions.) # - my $fullPath = $File::Find::name; push(@PendingDelete, { inode => $inode, path => $fullPath diff --git a/bin/BackupPC_tarPCCopy b/bin/BackupPC_tarPCCopy index 6dbf954..b5194d3 100755 --- a/bin/BackupPC_tarPCCopy +++ b/bin/BackupPC_tarPCCopy @@ -6,6 +6,8 @@ # contain hardlinks to the pool directory, which should be copied # before BackupPC_tarPCCopy is run. # +# See the documentation for use. +# # DESCRIPTION # # Usage: BackupPC_tarPCCopy [options] files/directories... diff --git a/conf/BackupPC_stnd.css b/conf/BackupPC_stnd.css index a50bcab..3bd887a 100644 --- a/conf/BackupPC_stnd.css +++ b/conf/BackupPC_stnd.css @@ -1,7 +1,7 @@ /* * BackupPC standard CSS definitions * - * Version 3.0.0beta2, released 31 Oct 2006. + * Version 3.0.0, released 28 Jan 2007. * * See http://backuppc.sourceforge.net. * @@ -95,6 +95,13 @@ a.navbar { background-color:#eeeeee; } +table.sortable a.sortheader { + background-color:#eeeeee; + font-weight: bold; + text-decoration: none; + display: block; +} + .border { font-size:10pt; } diff --git a/conf/config.pl b/conf/config.pl index 9310157..eac455e 100644 --- a/conf/config.pl +++ b/conf/config.pl @@ -29,7 +29,7 @@ # Craig Barratt # # COPYRIGHT -# Copyright (C) 2001-2003 Craig Barratt +# Copyright (C) 2001-2007 Craig Barratt # # See http://backuppc.sourceforge.net. # @@ -103,19 +103,17 @@ $Conf{UmaskMode} = 027; # # Examples: # $Conf{WakeupSchedule} = [22.5]; # once per day at 10:30 pm. -# $Conf{WakeupSchedule} = [1..23]; # every hour except midnight # $Conf{WakeupSchedule} = [2,4,6,8,10,12,14,16,18,20,22]; # every 2 hours # # The default value is every hour except midnight. # -# The first entry of $Conf{WakeupSchedule} is when BackupPC_nightly -# is run. No other backups can run while BackupPC_nightly is -# running. You might want to re-arrange the entries in -# $Conf{WakeupSchedule} (they don't have to be ascending) so that -# the first entry is when you want BackupPC_nightly to run -# (eg: when you don't expect a lot of regular backups to run). +# The first entry of $Conf{WakeupSchedule} is when BackupPC_nightly is run. +# You might want to re-arrange the entries in $Conf{WakeupSchedule} +# (they don't have to be ascending) so that the first entry is when +# you want BackupPC_nightly to run (eg: when you don't expect a lot +# of regular backups to run). # -$Conf{WakeupSchedule} = [1..23]; +$Conf{WakeupSchedule} = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]; # # Maximum number of simultaneous backups to run. If there @@ -1897,8 +1895,9 @@ $Conf{CgiUserHomePageCheck} = ''; $Conf{CgiUserUrlCreate} = 'mailto:%s'; # -# Date display format for CGI interface. True for US-style dates (MM/DD) -# and zero for international dates (DD/MM). +# Date display format for CGI interface. A value of 1 uses US-style +# dates (MM/DD), a value of 2 uses full YYYY-MM-DD format, and zero +# for international dates (DD/MM). # $Conf{CgiDateFormatMMDD} = 1; @@ -2052,6 +2051,7 @@ $Conf{CgiUserConfigEdit} = { RsyncShareName => 1, RsyncdClientPort => 1, RsyncdPasswd => 1, + RsyncdUserName => 1, RsyncdAuthRequired => 1, RsyncCsumCacheVerifyProb => 1, RsyncArgs => 1, diff --git a/conf/sorttable.js b/conf/sorttable.js new file mode 100644 index 0000000..32197c9 --- /dev/null +++ b/conf/sorttable.js @@ -0,0 +1,188 @@ +// Used with under license grant from http://kryogenix.org/code/browser/sorttable/ +// Credit for this code goes to Stuart Langridge - http://kryogenix.org/contact + +addEvent(window, "load", sortables_init); + +var SORT_COLUMN_INDEX; + +function sortables_init() { + // Find all tables with class sortable and make them sortable + if (!document.getElementsByTagName) return; + tbls = document.getElementsByTagName("table"); + for (ti=0;ti 0) { + var firstRow = table.rows[0]; + } + if (!firstRow) return; + + // We have a first row: assume it's the header, and make its contents clickable links + for (var i=0;i' + + txt+'   '; + } +} + +function ts_getInnerText(el) { + if (typeof el == "string") return el; + if (typeof el == "undefined") { return el }; + if (el.innerText) return el.innerText; //Not needed but it is faster + var str = ""; + + var cs = el.childNodes; + var l = cs.length; + for (var i = 0; i < l; i++) { + switch (cs[i].nodeType) { + case 1: //ELEMENT_NODE + str += ts_getInnerText(cs[i]); + break; + case 3: //TEXT_NODE + str += cs[i].nodeValue; + break; + } + } + return str; +} + +function ts_resortTable(lnk,clid) { + // get the span + var span; + for (var ci=0;ci. =head1 Copyright -Copyright (C) 2001-2006 Craig Barratt +Copyright (C) 2001-2007 Craig Barratt =head1 Credits diff --git a/init.d/README b/init.d/README index e08e77b..afa4a61 100644 --- a/init.d/README +++ b/init.d/README @@ -79,6 +79,24 @@ start automatically at boot (at the default run level): rc-update add backuppc default +FreeBSD: +======= + +When configure.pl is run, the script freebsd-backuppc is created. + +Copy this script to /usr/local/etc/rc.d/backuppc and make execuatble. + +Add the following line to /etc/rc.conf to enable BackupPC: + +backuppc_enable=(bool): Set to "NO" by default. + Set it to "YES" to enable BackupPC. + +Example: + + backuppc_enable="YES" + +The script accepts: start, stop, restart, reload, status + Slackware: ========= diff --git a/init.d/src/freebsd-backuppc b/init.d/src/freebsd-backuppc new file mode 100644 index 0000000..34a825a --- /dev/null +++ b/init.d/src/freebsd-backuppc @@ -0,0 +1,75 @@ +#!/bin/sh + +# PROVIDE: backuppc +# REQUIRE: DAEMON +# BEFORE: LOGIN +# KEYWORD: shutdown + +# +# Copy to /usr/local/etc/rc.d/backuppc and make execuatble +# +# Add the following line to /etc/rc.conf to enable BackupPC: +# backuppc_enable=(bool): Set to "NO" by default. +# Set it to "YES" to enable BackupPC. +# +# Example: +# +# backuppc_enable="YES" +# +# It accepts : start, stop, restart, reload, status +# +# Provided by : Gabriel Rossetti +# + +. /etc/rc.subr + +name="backuppc" +rcvar=`set_rcvar` +start_cmd="backuppc_start" +restart_cmd="backuppc_restart" +stop_cmd="backuppc_stop" +status_cmd="backuppc_status" +reload_cmd="backuppc_reload" + +load_rc_config $name +eval "${rcvar}=\${${rcvar}:-'NO'}" + +: ${backuppc_enable="NO"} +#backuppc_enable=${backuppc_enable:-"NO"} + +backuppc_start() +{ + su backuppc -c '__INSTALLDIR__/bin/BackupPC -d' + echo "${name} started" +} + +backuppc_restart() +{ + backuppc_stop + sleep 1 + backuppc_start +} + +backuppc_stop() +{ + /usr/bin/pkill -f "__INSTALLDIR__/bin/BackupPC -d" + echo "${name} stopped" +} + +backuppc_status() +{ + if [ "`ps ax | grep "BackupPC -d" | grep perl`" = "" ] ; then + echo "${name} not running" + else + echo "${name} running" + fi +} + +backuppc_reload() +{ + /usr/bin/pkill -1 -f "__INSTALLDIR__/bin/BackupPC -d" + echo "${name} reloaded" +} + +extra_commands="reload status" +run_rc_command "$1" diff --git a/lib/BackupPC/CGI/Browse.pm b/lib/BackupPC/CGI/Browse.pm index d8faed8..6e927f9 100644 --- a/lib/BackupPC/CGI/Browse.pm +++ b/lib/BackupPC/CGI/Browse.pm @@ -80,7 +80,7 @@ sub action my $backupTime = timeStamp2($Backups[$i]{startTime}); my $backupAge = sprintf("%.1f", (time - $Backups[$i]{startTime}) / (24 * 3600)); - my $view = BackupPC::View->new($bpc, $host, \@Backups); + my $view = BackupPC::View->new($bpc, $host, \@Backups, {nlink => 1}); if ( $dir eq "" || $dir eq "." || $dir eq ".." ) { $attr = $view->dirAttrib($num, "", ""); diff --git a/lib/BackupPC/CGI/DirHistory.pm b/lib/BackupPC/CGI/DirHistory.pm index e13a921..5abcc36 100644 --- a/lib/BackupPC/CGI/DirHistory.pm +++ b/lib/BackupPC/CGI/DirHistory.pm @@ -62,7 +62,7 @@ sub action ErrorExit($Lang->{Empty_host_name}) if ( $host eq "" ); my @Backups = $bpc->BackupInfoRead($host); - my $view = BackupPC::View->new($bpc, $host, \@Backups); + my $view = BackupPC::View->new($bpc, $host, \@Backups, {inode => 1}); my $hist = $view->dirHistory($share, $dir); my($backupNumStr, $backupTimeStr, $fileStr); diff --git a/lib/BackupPC/CGI/EditConfig.pm b/lib/BackupPC/CGI/EditConfig.pm index 2e734db..4811f4e 100644 --- a/lib/BackupPC/CGI/EditConfig.pm +++ b/lib/BackupPC/CGI/EditConfig.pm @@ -1466,6 +1466,8 @@ sub fieldInputParse } else { $$value = decode_utf8($In{"v_zZ_$varName"}); $$value =~ s/\r\n/\n/g; + # remove leading space from exec paths + $$value =~ s/^\s+// if ( $type->{type} eq "execPath" ); } $$value = undef if ( $type->{undefIfEmpty} && $$value eq "" ); } diff --git a/lib/BackupPC/CGI/Lib.pm b/lib/BackupPC/CGI/Lib.pm index b515bb9..8370b46 100644 --- a/lib/BackupPC/CGI/Lib.pm +++ b/lib/BackupPC/CGI/Lib.pm @@ -180,7 +180,10 @@ sub timeStamp2 my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime($_[0] == 0 ? time : $_[0] ); $mon++; - if ( $Conf{CgiDateFormatMMDD} ) { + if ( $Conf{CgiDateFormatMMDD} == 2 ) { + $year += 1900; + return sprintf("%04d-%02d-%02d %02d:%02d", $year, $mon, $mday, $hour, $min); + } elsif ( $Conf{CgiDateFormatMMDD} ) { return sprintf("$mon/$mday %02d:%02d", $hour, $min); } else { return sprintf("$mday/$mon %02d:%02d", $hour, $min); @@ -442,6 +445,7 @@ sub Header $title $Conf{CgiHeaders} +
EOF diff --git a/lib/BackupPC/CGI/Summary.pm b/lib/BackupPC/CGI/Summary.pm index 5c3cacd..9b26188 100644 --- a/lib/BackupPC/CGI/Summary.pm +++ b/lib/BackupPC/CGI/Summary.pm @@ -45,7 +45,7 @@ sub action $strNone, $strGood, $hostCntGood, $hostCntNone); $hostCntGood = $hostCntNone = 0; - GetStatusInfo("hosts"); + GetStatusInfo("hosts info"); my $Privileged = CheckPermission(); foreach my $host ( GetUserHosts(1) ) { @@ -160,6 +160,8 @@ EOF $fullSizeTot = sprintf("%.2f", $fullSizeTot / 1000); $incrSizeTot = sprintf("%.2f", $incrSizeTot / 1000); my $now = timeStamp2(time); + my $DUlastTime = timeStamp2($Info{DUlastValueTime}); + my $DUmaxTime = timeStamp2($Info{DUDailyMaxTime}); my $content = eval ("qq{$Lang->{BackupPC_Summary}}"); Header($Lang->{BackupPC__Server_Summary}, $content); diff --git a/lib/BackupPC/Config/Meta.pm b/lib/BackupPC/Config/Meta.pm index 242a789..1ad7d85 100644 --- a/lib/BackupPC/Config/Meta.pm +++ b/lib/BackupPC/Config/Meta.pm @@ -316,7 +316,7 @@ use vars qw(%ConfigMeta); }, CgiUserHomePageCheck => "string", CgiUserUrlCreate => "string", - CgiDateFormatMMDD => "boolean", + CgiDateFormatMMDD => "integer", CgiNavBarAdminAllHosts => "boolean", CgiSearchBoxEnable => "boolean", CgiNavBarLinks => { @@ -396,6 +396,7 @@ use vars qw(%ConfigMeta); TarClientRestoreCmd => "boolean", RsyncShareName => "boolean", RsyncdClientPort => "boolean", + RsyncdUserName => "boolean", RsyncdPasswd => "boolean", RsyncdAuthRequired => "boolean", RsyncCsumCacheVerifyProb => "boolean", diff --git a/lib/BackupPC/Lang/de.pm b/lib/BackupPC/Lang/de.pm index 5cb73d8..2d74daa 100644 --- a/lib/BackupPC/Lang/de.pm +++ b/lib/BackupPC/Lang/de.pm @@ -137,7 +137,13 @@ $Lang{BackupPC_Summary}=< -Dieser Status wurde am \$now generiert. +
    +
  • Dieser Status wurde am \$now generiert. +
  • Das Pool Filesystem (Backup-Speicherplatz) ist zu \$Info{DUlastValue}% + (\$DUlastTime) voll, das Maximum heute ist \$Info{DUDailyMax}% (\$DUmaxTime) + und das Maximum gestern war \$Info{DUDailyMaxPrev}%. (Hinweis: Sollten ca. 70% ?berschritten werden, so + ist evtl. bald eine Erweiterung des Backupspeichers erforderlich. Ist weitere Planung n?tig?) +

\${h2("Computer mit erfolgreichen Backups")} @@ -150,7 +156,7 @@ Es gibt \$hostCntGood Computer die erfolgreich gesichert wurden, mit insgesamt: (vor Pooling und Komprimierung).

- +
@@ -169,7 +175,7 @@ Es gibt \$hostCntGood Computer die erfolgreich gesichert wurden, mit insgesamt:

Es gibt \$hostCntNone Computer ohne Backups !!!

-

Computer Benutzer #Voll
+
diff --git a/lib/BackupPC/Lang/en.pm b/lib/BackupPC/Lang/en.pm index 7c58322..ba40885 100644 --- a/lib/BackupPC/Lang/en.pm +++ b/lib/BackupPC/Lang/en.pm @@ -125,7 +125,12 @@ $Lang{BackupPC_Summary} = < -This status was generated at \$now. +
    +
  • This status was generated at \$now. +
  • Pool file system was recently at \$Info{DUlastValue}% + (\$DUlastTime), today\'s max is \$Info{DUDailyMax}% (\$DUmaxTime) + and yesterday\'s max was \$Info{DUDailyMaxPrev}%. +

\${h2("Hosts with good Backups")} @@ -138,7 +143,7 @@ There are \$hostCntGood hosts that have been backed up, for a total of: (prior to pooling and compression).

-
Computer Benutzer #Voll
+
@@ -157,7 +162,7 @@ There are \$hostCntGood hosts that have been backed up, for a total of:

There are \$hostCntNone hosts with no backups.

-

Host User #Full
+
diff --git a/lib/BackupPC/Lang/es.pm b/lib/BackupPC/Lang/es.pm index 8ac23a4..67d2788 100644 --- a/lib/BackupPC/Lang/es.pm +++ b/lib/BackupPC/Lang/es.pm @@ -126,7 +126,12 @@ $Lang{BackupPC_Summary}=< -Este status ha sido generado el \$now. +
    +
  • Este status ha sido generado el \$now. +
  • El sistema de archivos estaba recientemente al \$Info{DUlastValue}% + (\$DUlastTime), el m?ximo de hoy es \$Info{DUDailyMax}% (\$DUmaxTime) + y el m?ximo de ayer era \$Info{DUDailyMaxPrev}%. +

\${h2("Hosts con Buenas Copias de Seguridad")} @@ -139,7 +144,7 @@ Il y a \$hostCntGood hosts tienen copia de seguridad, de un total de : (antes de agrupar y comprimir).

-
Host User #Full
+
@@ -158,7 +163,7 @@ Il y a \$hostCntGood hosts tienen copia de seguridad, de un total de :

Hay \$hostCntNone hosts sin copias de seguridad.

-

Host Usuario #Completo
+
diff --git a/lib/BackupPC/Lang/fr.pm b/lib/BackupPC/Lang/fr.pm index 38ee9d4..af324a9 100644 --- a/lib/BackupPC/Lang/fr.pm +++ b/lib/BackupPC/Lang/fr.pm @@ -125,7 +125,12 @@ $Lang{BackupPC_Summary}=< -Ce statut a été généré le \$now. +
    +
  • Ce statut a été généré le \$now. +
  • L\'espace de stockage a ?t? r?cemment rempli ? \$Info{DUlastValue}% + (\$DUlastTime), le maximum aujourd\'hui a ?t? de \$Info{DUDailyMax}% (\$DUmaxTime) + et hier le maximum ?tait \$Info{DUDailyMaxPrev}%. +

\${h2("Hôtes avec de bonnes sauvegardes")} @@ -138,7 +143,7 @@ Il y a \$hostCntGood h (précédant la mise en commun et la compression).

-
Host Usuario #Completo
+
@@ -157,7 +162,7 @@ Il y a \$hostCntGood h

Il y a \$hostCntNone hôtes sans sauvegardes.

-

Hôte Utilisateur Nb complètes
+
diff --git a/lib/BackupPC/Lang/it.pm b/lib/BackupPC/Lang/it.pm index 910bf41..6a54294 100644 --- a/lib/BackupPC/Lang/it.pm +++ b/lib/BackupPC/Lang/it.pm @@ -133,7 +133,13 @@ $Lang{BackupPC_Summary} = < -Questo rapporto di stato è stato generato il \$now. +
    +
  • Questo rapporto di stato è stato generato il \$now. +
  • Recentemente il sistema dei file di pool è stato al + \$Info{DUlastValue}% (\$DUlastTime). Il massimo di oggi + è del \$Info{DUDailyMax}% (\$DUmaxTime), mentre quello + di ieri era del \$Info{DUDailyMaxPrev}%. +

\${h2("Host con backup validi")} @@ -146,7 +152,7 @@ Ci sono \$hostCntGood host sottoposti a backup per un totale di: (prima del processo di pooling e compressione).

-
Hôte Utilisateur Nb complètes
+
@@ -165,7 +171,7 @@ Ci sono \$hostCntGood host sottoposti a backup per un totale di:

Ci sono \$hostCntNone host senza alcun backup.

-

Host Utente Completi
+
diff --git a/lib/BackupPC/Lang/nl.pm b/lib/BackupPC/Lang/nl.pm index 59e36cb..e690c8b 100644 --- a/lib/BackupPC/Lang/nl.pm +++ b/lib/BackupPC/Lang/nl.pm @@ -125,7 +125,12 @@ $Lang{BackupPC_Summary}=< -Dit overzicht dateert van \$now. +
    +
  • Dit overzicht dateert van \$now. +
  • Het backup filesystem werd recentelijk aangevuld voor \$Info{DUlastValue}% + op (\$DUlastTime), het maximum van vandaag is \$Info{DUDailyMax}% (\$DUmaxTime) + en het maximum van gisteren was \$Info{DUDailyMaxPrev}%. +

\${h2("Machine(s) met geslaagde backups")} @@ -138,7 +143,7 @@ Er zijn \$hostCntGood hosts gebackupt, wat een totaal geeft van: (voor samenvoegen).

-
Host Utente Completi
+
@@ -157,7 +162,7 @@ Er zijn \$hostCntGood hosts gebackupt, wat een totaal geeft van:

Er zijn \$hostCntNone hosts zonder backup.

-

Machine Gebruiker Aantal Voll.
+
diff --git a/lib/BackupPC/Lang/pt_br.pm b/lib/BackupPC/Lang/pt_br.pm index 94ea463..22e95a7 100644 --- a/lib/BackupPC/Lang/pt_br.pm +++ b/lib/BackupPC/Lang/pt_br.pm @@ -130,7 +130,12 @@ $Lang{BackupPC_Summary}=< -Este status foi generado em \$now. +
    +
  • Este status foi generado em \$now. +
  • O sistema de arquivos estava recentemente em \$Info{DUlastValue}% + (\$DUlastTime), o m?ximo de hoje ? \$Info{DUDailyMax}% (\$DUmaxTime) + e o m?ximo de ontem foi \$Info{DUDailyMaxPrev}%. +

\${h2("Hosts com Backups Completos")} @@ -143,7 +148,7 @@ Existem \$hostCntGood hosts com backup, de um total de : (antes de agrupar e comprimir).

-
Machine Gebruiker Aantal Voll.
+
@@ -162,7 +167,7 @@ Existem \$hostCntGood hosts com backup, de um total de :

Existem \$hostCntNone hosts sem backups.

-

Host Usuario #Completo
+
diff --git a/lib/BackupPC/Lib.pm b/lib/BackupPC/Lib.pm index 0c4b69b..44de3c5 100644 --- a/lib/BackupPC/Lib.pm +++ b/lib/BackupPC/Lib.pm @@ -41,9 +41,8 @@ use strict; use vars qw(%Conf %Lang); use BackupPC::Storage; -use Fcntl qw/:flock/; +use Fcntl ':mode'; use Carp; -use DirHandle (); use File::Path; use File::Compare; use Socket; @@ -51,6 +50,42 @@ use Cwd; use Digest::MD5; use Config; +use vars qw( $IODirentOk ); +use vars qw(@ISA @EXPORT @EXPORT_OK %EXPORT_TAGS); + +require Exporter; +require DynaLoader; + +@ISA = qw(Exporter DynaLoader); +@EXPORT_OK = qw( BPC_DT_UNKNOWN + BPC_DT_FIFO + BPC_DT_CHR + BPC_DT_DIR + BPC_DT_BLK + BPC_DT_REG + BPC_DT_LNK + BPC_DT_SOCK + ); +@EXPORT = qw( ); +%EXPORT_TAGS = ('BPC_DT_ALL' => [@EXPORT, @EXPORT_OK]); + +BEGIN { + eval "use IO::Dirent qw( readdirent DT_DIR );"; + $IODirentOk = 1 if ( !$@ ); +}; + +# +# The need to match the constants in IO::Dirent +# +use constant BPC_DT_UNKNOWN => 0; +use constant BPC_DT_FIFO => 1; ## named pipe (fifo) +use constant BPC_DT_CHR => 2; ## character special +use constant BPC_DT_DIR => 4; ## directory +use constant BPC_DT_BLK => 6; ## block special +use constant BPC_DT_REG => 8; ## regular +use constant BPC_DT_LNK => 10; ## symbolic link +use constant BPC_DT_SOCK => 12; ## socket + sub new { my $class = shift; @@ -404,11 +439,101 @@ sub HostsMTime return $bpc->{storage}->HostsMTime(); } +# +# Read a directory and return the entries in sorted inode order. +# This relies on the IO::Dirent module being installed. If not, +# the inode data is empty and the default directory order is +# returned. +# +# The returned data is a list of hashes with entries {name, type, inode, nlink}. +# The returned data includes "." and "..". +# +# $need is a hash of file attributes we need: type, inode, or nlink. +# If set, these parameters are added to the returned hash. +# +# If IO::Dirent is successful if will get type and inode for free. +# Otherwise, a stat is done on each file, which is more expensive. +# +sub dirRead +{ + my($bpc, $path, $need) = @_; + my(@entries, $addInode); + + return if ( !opendir(my $fh, $path) ); + if ( $IODirentOk ) { + @entries = sort({ $a->{inode} <=> $b->{inode} } readdirent($fh)); + map { $_->{type} = 0 + $_->{type} } @entries; # make type numeric + } else { + @entries = map { { name => $_} } readdir($fh); + } + closedir($fh); + if ( defined($need) ) { + for ( my $i = 0 ; $i < @entries ; $i++ ) { + next if ( (!$need->{inode} || defined($entries[$i]{inode})) + && (!$need->{type} || defined($entries[$i]{type})) + && (!$need->{nlink} || defined($entries[$i]{nlink})) ); + my @s = stat("$path/$entries[$i]{name}"); + $entries[$i]{nlink} = $s[3] if ( $need->{nlink} ); + if ( $need->{inode} && !defined($entries[$i]{inode}) ) { + $addInode = 1; + $entries[$i]{inode} = $s[1]; + } + if ( $need->{type} && !defined($entries[$i]{type}) ) { + my $mode = S_IFMT($s[2]); + $entries[$i]{type} = BPC_DT_FIFO if ( S_ISFIFO($mode) ); + $entries[$i]{type} = BPC_DT_CHR if ( S_ISCHR($mode) ); + $entries[$i]{type} = BPC_DT_DIR if ( S_ISDIR($mode) ); + $entries[$i]{type} = BPC_DT_BLK if ( S_ISBLK($mode) ); + $entries[$i]{type} = BPC_DT_REG if ( S_ISREG($mode) ); + $entries[$i]{type} = BPC_DT_LNK if ( S_ISLNK($mode) ); + $entries[$i]{type} = BPC_DT_SOCK if ( S_ISSOCK($mode) ); + } + } + } + # + # Sort the entries if inodes were added (the IO::Dirent case already + # sorted above) + # + @entries = sort({ $a->{inode} <=> $b->{inode} } @entries) if ( $addInode ); + return \@entries; +} + +# +# Same as dirRead, but only returns the names (which will be sorted in +# inode order if IO::Dirent is installed) +# +sub dirReadNames +{ + my($bpc, $path) = @_; + + my $entries = $bpc->dirRead($path); + return if ( !defined($entries) ); + my @names = map { $_->{name} } @$entries; + return \@names; +} + +sub find +{ + my($bpc, $param, $dir, $dontDoCwd) = @_; + + return if ( !chdir($dir) ); + my $entries = $bpc->dirRead(".", {inode => 1, type => 1}); + #print Dumper($entries); + foreach my $f ( @$entries ) { + next if ( $f->{name} eq ".." || $f->{name} eq "." && $dontDoCwd ); + $param->{wanted}($f->{name}, "$dir/$f->{name}"); + next if ( $f->{type} != BPC_DT_DIR || $f->{name} eq "." ); + chdir($f->{name}); + $bpc->find($param, "$dir/$f->{name}", 1); + return if ( !chdir("..") ); + } +} + # # Stripped down from File::Path. In particular we don't print # many warnings and we try three times to delete each directory # and file -- for some reason the original File::Path rmtree -# didn't always completely remove a directory tree on the NetApp. +# didn't always completely remove a directory tree on a NetApp. # # Warning: this routine changes the cwd. # @@ -433,13 +558,11 @@ sub RmTreeQuiet # if ( !unlink($root) ) { if ( -d $root ) { - my $d = DirHandle->new($root); + my $d = $bpc->dirReadNames($root); if ( !defined($d) ) { print(STDERR "Can't read $pwd/$root: $!\n"); } else { - @files = $d->read; - $d->close; - @files = grep $_!~/^\.{1,2}$/, @files; + @files = grep $_ !~ /^\.{1,2}$/, @$d; $bpc->RmTreeQuiet("$pwd/$root", \@files); chdir($pwd); rmdir($root) || rmdir($root); @@ -490,10 +613,8 @@ sub RmTreeTrashEmpty $cwd = $1 if ( $cwd =~ /(.*)/ ); return if ( !-d $trashDir ); - my $d = DirHandle->new($trashDir) or carp "Can't read $trashDir: $!"; - @files = $d->read; - $d->close; - @files = grep $_!~/^\.{1,2}$/, @files; + my $d = $bpc->dirReadNames($trashDir) or carp "Can't read $trashDir: $!"; + @files = grep $_ !~ /^\.{1,2}$/, @$d; return 0 if ( !@files ); $bpc->RmTreeQuiet($trashDir, \@files); foreach my $f ( @files ) { diff --git a/lib/BackupPC/View.pm b/lib/BackupPC/View.pm index 147c425..cf03983 100644 --- a/lib/BackupPC/View.pm +++ b/lib/BackupPC/View.pm @@ -49,16 +49,20 @@ use Data::Dumper; sub new { - my($class, $bpc, $host, $backups) = @_; + my($class, $bpc, $host, $backups, $options) = @_; my $m = bless { - bpc => $bpc, # BackupPC::Lib object - host => $host, # host name - backups => $backups, # all backups for this host - num => -1, # backup number - idx => -1, # index into backups for backup - # we are viewing - dirPath => undef, # path to current directory - dirAttr => undef, # attributes of current directory + bpc => $bpc, # BackupPC::Lib object + host => $host, # host name + backups => $backups, # all backups for this host + num => -1, # backup number + idx => -1, # index into backups for backup + # we are viewing + dirPath => undef, # path to current directory + dirAttr => undef, # attributes of current directory + dirOpts => $options, # $options is a hash of file attributes we need: + # type, inode, or nlink. If set, these parameters + # are added to the returned hash. + # See BackupPC::Lib::dirRead(). }, $class; for ( my $i = 0 ; $i < @{$m->{backups}} ; $i++ ) { next if ( defined($m->{backups}[$i]{level}) ); @@ -119,7 +123,9 @@ sub dirCache } $path .= $sharePathM; #print(STDERR "Opening $path (share=$share)\n"); - if ( !opendir(DIR, $path) ) { + + my $dirInfo = $m->{bpc}->dirRead($path, $m->{dirOpts}); + if ( !defined($dirInfo) ) { if ( $i == $m->{idx} ) { # # Oops, directory doesn't exist. @@ -129,8 +135,6 @@ sub dirCache } next; } - my @dir = readdir(DIR); - closedir(DIR); my $attr; if ( $mangle ) { $attr = BackupPC::Attrib->new({ compress => $compress }); @@ -139,8 +143,8 @@ sub dirCache $attr = undef; } } - foreach my $file ( @dir ) { - $file = $1 if ( $file =~ /(.*)/s ); + foreach my $entry ( @$dirInfo ) { + my $file = $1 if ( $entry->{name} =~ /(.*)/s ); my $fileUM = $file; $fileUM = $m->{bpc}->fileNameUnmangle($fileUM) if ( $mangle ); #print(STDERR "Doing $fileUM\n"); @@ -152,14 +156,13 @@ sub dirCache || $file eq "." || $file eq "backupInfo" || $mangle && $file eq "attrib" ); - # - # skip directories in earlier backups (each backup always - # has the complete directory tree). - # - my @s = stat("$path/$file"); - next if ( $i < $m->{idx} && -d _ ); if ( defined($attr) && defined(my $a = $attr->get($fileUM)) ) { $m->{files}{$fileUM} = $a; + # + # skip directories in earlier backups (each backup always + # has the complete directory tree). + # + next if ( $i < $m->{idx} && $a->{type} == BPC_FTYPE_DIR ); $attr->set($fileUM, undef); } else { # @@ -167,6 +170,8 @@ sub dirCache # is on. We have to stat the file and read compressed files # to determine their size. # + my @s = stat("$path/$file"); + next if ( $i < $m->{idx} && -d _ ); $m->{files}{$fileUM} = { type => -d _ ? BPC_FTYPE_DIR : BPC_FTYPE_FILE, mode => $s[2], @@ -199,8 +204,10 @@ sub dirCache ($m->{files}{$fileUM}{fullPath} = "$path/$file") =~ s{//+}{/}g; $m->{files}{$fileUM}{backupNum} = $backupNum; $m->{files}{$fileUM}{compress} = $compress; - $m->{files}{$fileUM}{nlink} = $s[3]; - $m->{files}{$fileUM}{inode} = $s[1]; + $m->{files}{$fileUM}{nlink} = $entry->{nlink} + if ( $m->{dirOpts}{nlink} ); + $m->{files}{$fileUM}{inode} = $entry->{inode} + if ( $m->{dirOpts}{inode} ); } # # Also include deleted files @@ -384,14 +391,14 @@ sub dirHistory } $path .= $sharePathM; #print(STDERR "Opening $path (share=$share)\n"); - if ( !opendir(DIR, $path) ) { + + my $dirInfo = $m->{bpc}->dirRead($path, $m->{dirOpts}); + if ( !defined($dirInfo) ) { # # Oops, directory doesn't exist. # next; } - my @dir = readdir(DIR); - closedir(DIR); my $attr; if ( $mangle ) { $attr = BackupPC::Attrib->new({ compress => $compress }); @@ -400,8 +407,8 @@ sub dirHistory $attr = undef; } } - foreach my $file ( @dir ) { - $file = $1 if ( $file =~ /(.*)/s ); + foreach my $entry ( @$dirInfo ) { + my $file = $1 if ( $entry->{name} =~ /(.*)/s ); my $fileUM = $file; $fileUM = $m->{bpc}->fileNameUnmangle($fileUM) if ( $mangle ); #print(STDERR "Doing $fileUM\n"); @@ -454,8 +461,10 @@ sub dirHistory ($files->{$fileUM}[$i]{fullPath} = "$path/$file") =~ s{//+}{/}g; $files->{$fileUM}[$i]{backupNum} = $backupNum; $files->{$fileUM}[$i]{compress} = $compress; - $files->{$fileUM}[$i]{nlink} = $s[3]; - $files->{$fileUM}[$i]{inode} = $s[1]; + $files->{$fileUM}[$i]{nlink} = $entry->{nlink} + if ( $m->{dirOpts}{nlink} ); + $files->{$fileUM}[$i]{inode} = $entry->{inode} + if ( $m->{dirOpts}{inode} ); } # diff --git a/lib/BackupPC/Xfer/RsyncDigest.pm b/lib/BackupPC/Xfer/RsyncDigest.pm index 584ad41..3af92f7 100644 --- a/lib/BackupPC/Xfer/RsyncDigest.pm +++ b/lib/BackupPC/Xfer/RsyncDigest.pm @@ -266,10 +266,10 @@ sub digestStart if ( $fileSize > 0 && $compress && $doCache >= 0 ) { open(my $fh, "<", $fileName) || return -2; binmode($fh); - return -3 if ( read($fh, $data, 1) != 1 ); + return -3 if ( sysread($fh, $data, 4096) < 1 ); my $ret; - if ( ($data eq chr(0x78) || $data eq chr(0xd6)) && $doCache > 0 + if ( (vec($data, 0, 8) == 0x78 || vec($data, 0, 8) == 0xd6) && $doCache > 0 && $checksumSeed == RSYNC_CSUMSEED_CACHE ) { # # RSYNC_CSUMSEED_CACHE (32761) is the magic number that @@ -298,31 +298,44 @@ sub digestStart binmode($fh); return -5 if ( read($fh, $data, 1) != 1 ); } - if ( $ret >= 0 && $data eq chr(0xd7) ) { + if ( $ret >= 0 && vec($data, 0, 8) == 0xd7 ) { # # Looks like this file has cached checksums # Read the last 48 bytes: that's 2 file MD4s (32 bytes) # plus 4 words of meta data # - return -6 if ( !defined(seek($fh, -48, 2)) ); - return -7 if ( read($fh, $data, 48) != 48 ); + my $cacheInfo; + if ( length($data) >= 4096 ) { + return -6 if ( !defined(sysseek($fh, -4096, 2)) ); + return -7 if ( sysread($fh, $data, 4096) != 4096 ); + } + $cacheInfo = substr($data, -48); ($dg->{md4DigestOld}, $dg->{md4Digest}, $dg->{blockSize}, $dg->{checksumSeed}, $dg->{nBlocks}, - $dg->{magic}) = unpack("a16 a16 V V V V", $data); + $dg->{magic}) = unpack("a16 a16 V V V V", $cacheInfo); if ( $dg->{magic} == 0x5fe3c289 && $dg->{checksumSeed} == $checksumSeed && ($blockSize == 0 || $dg->{blockSize} == $blockSize) ) { $dg->{fh} = $fh; $dg->{cached} = 1; - # - # position the file at the start of the rsync block checksums - # (4 (adler) + 16 (md4) bytes each) - # - return -8 - if ( !defined(seek($fh, -$dg->{nBlocks}*20 - 48, 2)) ); + if ( length($data) >= $dg->{nBlocks} * 20 + 48 ) { + # + # We have all the data already - just remember it + # + $dg->{digestData} = substr($data, + length($data) - $dg->{nBlocks} * 20 - 48, + $dg->{nBlocks} * 20); + } else { + # + # position the file at the start of the rsync block checksums + # (4 (adler) + 16 (md4) bytes each) + # + return -8 + if ( !defined(sysseek($fh, -$dg->{nBlocks} * 20 - 48, 2)) ); + } } else { # # cached checksums are not valid, so we close the @@ -365,7 +378,12 @@ sub digestGet if ( $dg->{cached} ) { my $thisNum = $num; $thisNum = $dg->{nBlocks} if ( $thisNum > $dg->{nBlocks} ); - read($dg->{fh}, $fileData, 20 * $thisNum); + if ( defined($dg->{digestData}) ) { + $fileData = substr($dg->{digestData}, 0, 20 * $thisNum); + $dg->{digestData} = substr($dg->{digestData}, 20 * $thisNum); + } else { + sysread($dg->{fh}, $fileData, 20 * $thisNum); + } $dg->{nBlocks} -= $thisNum; if ( $thisNum < $num && !$noPad) { # diff --git a/lib/BackupPC/Xfer/Tar.pm b/lib/BackupPC/Xfer/Tar.pm index c34b019..60bfd36 100644 --- a/lib/BackupPC/Xfer/Tar.pm +++ b/lib/BackupPC/Xfer/Tar.pm @@ -244,7 +244,7 @@ sub readOutput # # Ignore annoying log message on incremental for tar 1.15.x # - if ( !/: file is unchanged; not dumped$/ ) { + if ( !/: file is unchanged; not dumped$/ && !/: socket ignored$/ ) { $t->{XferLOG}->write(\"$_\n") if ( $t->{logLevel} >= 0 ); $t->{xferErrCnt}++; } diff --git a/makeDist b/makeDist index c2eac74..ad21c6f 100755 --- a/makeDist +++ b/makeDist @@ -20,7 +20,7 @@ # Craig Barratt # # COPYRIGHT -# Copyright (C) 2001-2006 Craig Barratt +# Copyright (C) 2001-2007 Craig Barratt # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -53,8 +53,8 @@ die("BackupPC::Lib->new failed\n") umask(0022); -my $Version = "3.0.0"; -my $ReleaseDate = "28 Jan 2007"; +my $Version = "3.1.0"; +my $ReleaseDate = "15 Apr 2007"; my $DistDir = "dist/BackupPC-$Version"; my @PerlSrc = qw( @@ -223,8 +223,10 @@ foreach my $file ( (@PerlSrc, conf/hosts conf/BackupPC_stnd.css conf/BackupPC_stnd_orig.css + conf/sorttable.js init.d/README init.d/src/debian-backuppc + init.d/src/freebsd-backuppc init.d/src/gentoo-backuppc init.d/src/gentoo-backuppc.conf init.d/src/linux-backuppc
Host Usuario #Completo