X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=blobdiff_plain;f=bin%2FBackupPC;h=d9b58fb2f6f6a1a885628d9d0cf78396fae147e5;hp=ba6cde9d2f7b472ebe9c9623558a72a19334f986;hb=9cf3998c4ef71332dea96ff3115daf8b9f722acb;hpb=d6dd48e6594d937dea49e3dd1c5ddaba6719df0d diff --git a/bin/BackupPC b/bin/BackupPC index ba6cde9..d9b58fb 100755 --- a/bin/BackupPC +++ b/bin/BackupPC @@ -1,4 +1,4 @@ -#!/bin/perl +#!/usr/bin/perl #============================================================= -*-perl-*- # # BackupPC: Main program for PC backups. @@ -6,7 +6,7 @@ # DESCRIPTION # # BackupPC reads the configuration and status information from -# $TopDir/conf. It then runs and manages all the backup activity. +# $ConfDir/conf. It then runs and manages all the backup activity. # # As specified by $Conf{WakeupSchedule}, BackupPC wakes up periodically # to queue backups on all the PCs. This is a four step process: @@ -29,7 +29,7 @@ # Craig Barratt # # COPYRIGHT -# Copyright (C) 2001-2003 Craig Barratt +# Copyright (C) 2001-2009 Craig Barratt # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -47,7 +47,7 @@ # #======================================================================== # -# Version 2.1.0_CVS, released 13 Mar 2004. +# Version 3.2.0beta0, released 5 April 2009. # # See http://backuppc.sourceforge.net. # @@ -59,6 +59,7 @@ use vars qw(%Status %Info $Hosts); use lib "/usr/local/BackupPC/lib"; use BackupPC::Lib; use BackupPC::FileZIO; +use Encode qw/decode_utf8/; use File::Path; use Data::Dumper; @@ -66,6 +67,7 @@ use Getopt::Std; use Socket; use Carp; use Digest::MD5; +use POSIX qw(setsid); ########################################################################### # Handle command line options @@ -86,6 +88,7 @@ if ( !getopts("d", \%opts) || @ARGV != 0 ) { die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) ); my $TopDir = $bpc->TopDir(); my $BinDir = $bpc->BinDir(); +my $LogDir = $bpc->LogDir(); my %Conf = $bpc->Conf(); # @@ -110,10 +113,14 @@ if ( $Conf{BackupPCUserVerify} # # Read old status # -if ( -f "$TopDir/log/status.pl" && !(my $ret = do "$TopDir/log/status.pl") ) { - die "couldn't parse $TopDir/log/status.pl: $@" if $@; - die "couldn't do $TopDir/log/status.pl: $!" unless defined $ret; - die "couldn't run $TopDir/log/status.pl"; +if ( -f "$LogDir/status.pl" && !(my $ret = do "$LogDir/status.pl") ) { + if ( $@ ) { + print STDERR "couldn't parse $LogDir/status.pl: $@"; + } elsif ( !defined($ret) ) { + print STDERR "couldn't do $LogDir/status.pl: $!"; + } else { + print STDERR "couldn't run $LogDir/status.pl"; + } } # @@ -173,6 +180,7 @@ my $SigName = ""; # Misc variables # my($RunNightlyWhenIdle, $FirstWakeup, $CmdJob, $ServerInetPort); +my($BackupPCNightlyJobs, $BackupPCNightlyLock); # # Complete the rest of the initialization @@ -240,14 +248,16 @@ sub Main_Initialize umask($Conf{UmaskMode}); # - # Check for another running process, check that PASSWD is set and - # verify executables are configured correctly. + # Check for another running process, verify executables are configured + # correctly and make sure $TopDir is on a file system that supports + # hardlinks. # if ( $Info{pid} ne "" && kill(0, $Info{pid}) ) { print(STDERR $bpc->timeStamp, "Another BackupPC is running (pid $Info{pid}); quitting...\n"); exit(1); } + foreach my $progName ( qw(SmbClientPath NmbLookupPath PingPath DfPath SendmailPath SshPath) ) { next if ( $Conf{$progName} eq "" || -x $Conf{$progName} ); @@ -257,12 +267,35 @@ sub Main_Initialize exit(1); } + if ( !$bpc->HardlinkTest("$TopDir/pc", "$TopDir/cpool") ) { + print(STDERR $bpc->timeStamp, "Can't create a test hardlink between a file" + . " in $TopDir/pc and $TopDir/cpool. Either these are different" + . " file systems, or this file system doesn't support hardlinks," + . " or these directories don't exist, or there is a permissions" + . " problem, or the file system is out of inodes or full. Use" + . " df, df -i, and ls -ld to check each of these possibilities." + . " Quitting...\n"); + exit(1); + } + if ( $opts{d} ) { # - # daemonize by forking + # daemonize by forking; more robust method per: + # http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=301057 # - defined(my $pid = fork) or die "Can't fork: $!"; + my $pid; + defined($pid = fork) or die("Can't fork: $!"); + exit if ( $pid ); # parent exits + + POSIX::setsid(); + defined($pid = fork) or die("Can't fork: $!"); exit if $pid; # parent exits + + chdir ("/") or die("Cannot chdir to /: $!\n"); + close(STDIN); + open(STDIN , ">/dev/null") or die("Cannot open /dev/null as stdin\n"); + # STDOUT and STDERR are handled in LogFileOpen() right below, + # otherwise we would have to reopen them too. } # @@ -330,9 +363,11 @@ sub Main_Initialize # Write out our initial status and save our PID # StatusWrite(); - if ( open(PID, ">", "$TopDir/log/BackupPC.pid") ) { + unlink("$LogDir/BackupPC.pid"); + if ( open(PID, ">", "$LogDir/BackupPC.pid") ) { print(PID $$); close(PID); + chmod(0444, "$LogDir/BackupPC.pid"); } # @@ -373,15 +408,84 @@ sub Main_TryToRun_nightly }); $CmdQueueOn{$bpc->trashJob} = 1; } - if ( keys(%Jobs) == $trashCleanRunning && $RunNightlyWhenIdle == 1 ) { - push(@CmdQueue, { - host => $bpc->adminJob, - user => "BackupPC", - reqTime => time, - cmd => ["$BinDir/BackupPC_nightly"], - }); - $CmdQueueOn{$bpc->adminJob} = 1; - $RunNightlyWhenIdle = 2; + if ( $RunNightlyWhenIdle == 1 ) { + # + # Queue multiple nightly jobs based on the configuration + # + $Conf{MaxBackupPCNightlyJobs} = 1 + if ( $Conf{MaxBackupPCNightlyJobs} <= 0 ); + $Conf{BackupPCNightlyPeriod} = 1 + if ( $Conf{BackupPCNightlyPeriod} <= 0 ); + # + # Decide what subset of the 16 top-level directories 0..9a..f + # we run BackupPC_nightly on, based on $Conf{BackupPCNightlyPeriod}. + # If $Conf{BackupPCNightlyPeriod} == 1 then we run 0..15 every + # time. If $Conf{BackupPCNightlyPeriod} == 2 then we run + # 0..7 one night and 89a-f the next night. And so on. + # + # $Info{NightlyPhase} counts which night, from 0 to + # $Conf{BackupPCNightlyPeriod} - 1. + # + my $start = int($Info{NightlyPhase} * 16 + / $Conf{BackupPCNightlyPeriod}); + my $end = int(($Info{NightlyPhase} + 1) * 16 + / $Conf{BackupPCNightlyPeriod}); + $end = $start + 1 if ( $end <= $start ); + $Info{NightlyPhase}++; + $Info{NightlyPhase} = 0 if ( $end >= 16 ); + + # + # Zero out the data we expect to get from BackupPC_nightly. + # + for my $p ( qw(pool cpool) ) { + for ( my $i = $start ; $i < $end ; $i++ ) { + $Info{pool}{$p}[$i]{FileCnt} = 0; + $Info{pool}{$p}[$i]{DirCnt} = 0; + $Info{pool}{$p}[$i]{Kb} = 0; + $Info{pool}{$p}[$i]{Kb2} = 0; + $Info{pool}{$p}[$i]{KbRm} = 0; + $Info{pool}{$p}[$i]{FileCntRm} = 0; + $Info{pool}{$p}[$i]{FileCntRep} = 0; + $Info{pool}{$p}[$i]{FileRepMax} = 0; + $Info{pool}{$p}[$i]{FileCntRename} = 0; + $Info{pool}{$p}[$i]{FileLinkMax} = 0; + $Info{pool}{$p}[$i]{Time} = 0; + } + } + print(LOG $bpc->timeStamp, + sprintf("Running %d BackupPC_nightly jobs from %d..%d" + . " (out of 0..15)\n", + $Conf{MaxBackupPCNightlyJobs}, $start, $end - 1)); + + # + # Now queue the $Conf{MaxBackupPCNightlyJobs} jobs. + # The granularity on start and end is now 0..255. + # + $start *= 16; + $end *= 16; + my $start0 = $start; + for ( my $i = 0 ; $i < $Conf{MaxBackupPCNightlyJobs} ; $i++ ) { + # + # The first nightly job gets the -m option (does email, log aging). + # All jobs get the start and end options from 0..255 telling + # them which parts of the pool to traverse. + # + my $cmd = ["$BinDir/BackupPC_nightly"]; + push(@$cmd, "-m") if ( $i == 0 ); + push(@$cmd, $start); + $start = $start0 + int(($end - $start0) + * ($i + 1) / $Conf{MaxBackupPCNightlyJobs}); + push(@$cmd, $start - 1); + my $job = $bpc->adminJob($i); + unshift(@CmdQueue, { + host => $job, + user => "BackupPC", + reqTime => time, + cmd => $cmd, + }); + $CmdQueueOn{$job} = 1; + } + $RunNightlyWhenIdle = 2; } } @@ -397,7 +501,11 @@ sub Main_TryToRun_nightly sub Main_TryToRun_CmdQueue { my($req, $host); - if ( $CmdJob eq "" && @CmdQueue > 0 && $RunNightlyWhenIdle != 1 ) { + + while ( $CmdJob eq "" && @CmdQueue > 0 && $RunNightlyWhenIdle != 1 + || @CmdQueue > 0 && $RunNightlyWhenIdle == 2 + && $bpc->isAdminJob($CmdQueue[0]->{host}) + ) { local(*FH); $req = pop(@CmdQueue); @@ -425,6 +533,8 @@ sub Main_TryToRun_CmdQueue } if ( !$pid ) { setpgrp 0,0; + $ENV{BPC_REQUSER} = $req->{user}; + POSIX::nice($Conf{CmdQueueNice}) if ( $Conf{CmdQueueNice} ); exec(@$cmd); print(LOG $bpc->timeStamp, "can't exec @$cmd for $host\n"); exit(0); @@ -435,8 +545,9 @@ sub Main_TryToRun_CmdQueue vec($FDread, $Jobs{$host}{fn}, 1) = 1; $Jobs{$host}{startTime} = time; $Jobs{$host}{reqTime} = $req->{reqTime}; - $cmd = join(" ", @$cmd); + $cmd = $bpc->execCmd2ShellCmd(@$cmd); $Jobs{$host}{cmd} = $cmd; + $Jobs{$host}{user} = $req->{user}; $Jobs{$host}{type} = $Status{$host}{type}; $Status{$host}{state} = "Status_link_running"; $Status{$host}{activeJob} = 1; @@ -444,6 +555,10 @@ sub Main_TryToRun_CmdQueue $CmdJob = $host if ( $host ne $bpc->trashJob ); $cmd =~ s/$BinDir\///g; print(LOG $bpc->timeStamp, "Running $cmd (pid=$pid)\n"); + if ( $cmd =~ /^BackupPC_nightly\s/ ) { + $BackupPCNightlyJobs++; + $BackupPCNightlyLock++; + } } } @@ -461,14 +576,77 @@ sub Main_TryToRun_CmdQueue sub Main_TryToRun_Bg_or_User_Queue { my($req, $host); - while ( $RunNightlyWhenIdle == 0 ) { + my(@deferUserQueue, @deferBgQueue); + my $du; + + if ( time - $Info{DUlastValueTime} >= 600 ) { + # + # Update our notion of disk usage no more than + # once every 10 minutes + # + $du = $bpc->CheckFileSystemUsage($TopDir); + $Info{DUlastValue} = $du; + $Info{DUlastValueTime} = time; + } else { + # + # if we recently checked it then just use the old value + # + $du = $Info{DUlastValue}; + } + if ( $Info{DUDailyMaxReset} ) { + $Info{DUDailyMaxStartTime} = time; + $Info{DUDailyMaxReset} = 0; + $Info{DUDailyMax} = 0; + } + if ( $du > $Info{DUDailyMax} ) { + $Info{DUDailyMax} = $du; + $Info{DUDailyMaxTime} = time; + } + if ( $du > $Conf{DfMaxUsagePct} ) { + my @bgQueue = @BgQueue; + my $nSkip = 0; + + # + # When the disk is too full, only run backups that will + # do expires, not regular backups + # + @BgQueue = (); + foreach $req ( @bgQueue ) { + if ( $req->{dumpExpire} ) { + unshift(@BgQueue, $req); + } else { + $BgQueueOn{$req->{host}} = 0; + $nSkip++; + } + } + if ( $nSkip ) { + print(LOG $bpc->timeStamp, + "Disk too full ($du%); skipped $nSkip hosts\n"); + $Info{DUDailySkipHostCnt} += $nSkip; + } + } + + # + # Run background jobs anytime. Previously they were locked out + # when BackupPC_nightly was running or pending with this + # condition on the while loop: + # + # while ( $RunNightlyWhenIdle == 0 ) + # + while ( 1 ) { local(*FH); - my(@args, @deferUserQueue, @deferBgQueue, $progName, $type); + my(@args, $progName, $type); my $nJobs = keys(%Jobs); # # CmdJob and trashClean don't count towards MaxBackups / MaxUserBackups # - $nJobs-- if ( $CmdJob ne "" ); + if ( $CmdJob ne "" ) { + if ( $BackupPCNightlyJobs ) { + $nJobs -= $BackupPCNightlyJobs; + } else { + $nJobs--; + } + } $nJobs-- if ( defined($Jobs{$bpc->trashJob} ) ); if ( $nJobs < $Conf{MaxBackups} + $Conf{MaxUserBackups} && @UserQueue > 0 ) { @@ -477,60 +655,26 @@ sub Main_TryToRun_Bg_or_User_Queue push(@deferUserQueue, $req); next; } - push(@args, $req->{doFull} ? "-f" : "-i") - if (( !$req->{restore} ) && ( !$req->{archive} )); $UserQueueOn{$req->{host}} = 0; } elsif ( $nJobs < $Conf{MaxBackups} && (@CmdQueue + $nJobs) <= $Conf{MaxBackups} + $Conf{MaxPendingCmds} && @BgQueue > 0 ) { - my $du; - if ( time - $Info{DUlastValueTime} >= 60 ) { - # - # Update our notion of disk usage no more than - # once every minute - # - $du = $bpc->CheckFileSystemUsage($TopDir); - $Info{DUlastValue} = $du; - $Info{DUlastValueTime} = time; - } else { - # - # if we recently checked it then just use the old value - # - $du = $Info{DUlastValue}; - } - if ( $Info{DUDailyMaxReset} ) { - $Info{DUDailyMaxStartTime} = time; - $Info{DUDailyMaxReset} = 0; - $Info{DUDailyMax} = 0; - } - if ( $du > $Info{DUDailyMax} ) { - $Info{DUDailyMax} = $du; - $Info{DUDailyMaxTime} = time; - } - if ( $du > $Conf{DfMaxUsagePct} ) { - my $nSkip = @BgQueue + @deferBgQueue; - print(LOG $bpc->timeStamp, - "Disk too full ($du%%); skipping $nSkip hosts\n"); - $Info{DUDailySkipHostCnt} += $nSkip; - @BgQueue = (); - @deferBgQueue = (); - %BgQueueOn = (); - next; - } $req = pop(@BgQueue); if ( defined($Jobs{$req->{host}}) ) { - push(@deferBgQueue, $req); + # + # Job is currently running for this host; save it for later + # + unshift(@deferBgQueue, $req); next; } $BgQueueOn{$req->{host}} = 0; } else { - while ( @deferBgQueue ) { - push(@BgQueue, pop(@deferBgQueue)); - } - while ( @deferUserQueue ) { - push(@UserQueue, pop(@deferUserQueue)); - } + # + # Restore the deferred jobs + # + @BgQueue = (@BgQueue, @deferBgQueue); + @UserQueue = (@UserQueue, @deferUserQueue); last; } $host = $req->{host}; @@ -546,7 +690,11 @@ sub Main_TryToRun_Bg_or_User_Queue } else { $progName = "BackupPC_dump"; $type = "backup"; - push(@args, "-d") if ( $req->{dhcp} ); + push(@args, "-I") if ( $req->{backupType} eq "autoIncr" ); + push(@args, "-F") if ( $req->{backupType} eq "autoFull" ); + push(@args, "-i") if ( $req->{backupType} eq "doIncr" ); + push(@args, "-f") if ( $req->{backupType} eq "doFull" ); + push(@args, "-d") if ( $req->{backupType} eq "dhcpPoll" ); push(@args, "-e") if ( $req->{dumpExpire} ); push(@args, $host); } @@ -571,7 +719,7 @@ sub Main_TryToRun_Bg_or_User_Queue $Jobs{$host}{startTime} = time; $Jobs{$host}{reqTime} = $req->{reqTime}; $Jobs{$host}{userReq} = $req->{userReq}; - $Jobs{$host}{cmd} = join(" ", $progName, @args); + $Jobs{$host}{cmd} = $bpc->execCmd2ShellCmd($progName, @args); $Jobs{$host}{user} = $user; $Jobs{$host}{type} = $type; $Status{$host}{userReq} = $req->{userReq} @@ -685,39 +833,47 @@ sub Main_Check_Timeout $Info{DUDailySkipHostCntPrev} = $Info{DUDailySkipHostCnt}; $Info{DUDailySkipHostCnt} = 0; my $lastLog = $Conf{MaxOldLogFiles} - 1; - if ( -f "$TopDir/log/LOG.$lastLog" ) { + if ( -f "$LogDir/LOG.$lastLog" ) { print(LOG $bpc->timeStamp, - "Removing $TopDir/log/LOG.$lastLog\n"); - unlink("$TopDir/log/LOG.$lastLog"); + "Removing $LogDir/LOG.$lastLog\n"); + unlink("$LogDir/LOG.$lastLog"); } - if ( -f "$TopDir/log/LOG.$lastLog.z" ) { + if ( -f "$LogDir/LOG.$lastLog.z" ) { print(LOG $bpc->timeStamp, - "Removing $TopDir/log/LOG.$lastLog.z\n"); - unlink("$TopDir/log/LOG.$lastLog.z"); + "Removing $LogDir/LOG.$lastLog.z\n"); + unlink("$LogDir/LOG.$lastLog.z"); } print(LOG $bpc->timeStamp, "Aging LOG files, LOG -> LOG.0 -> " . "LOG.1 -> ... -> LOG.$lastLog\n"); + close(STDERR); # dup of LOG + close(STDOUT); # dup of LOG close(LOG); for ( my $i = $lastLog - 1 ; $i >= 0 ; $i-- ) { my $j = $i + 1; - rename("$TopDir/log/LOG.$i", "$TopDir/log/LOG.$j") - if ( -f "$TopDir/log/LOG.$i" ); - rename("$TopDir/log/LOG.$i.z", "$TopDir/log/LOG.$j.z") - if ( -f "$TopDir/log/LOG.$i.z" ); + rename("$LogDir/LOG.$i", "$LogDir/LOG.$j") + if ( -f "$LogDir/LOG.$i" ); + rename("$LogDir/LOG.$i.z", "$LogDir/LOG.$j.z") + if ( -f "$LogDir/LOG.$i.z" ); } # # Compress the log file LOG -> LOG.0.z (if enabled). # Otherwise, just rename LOG -> LOG.0. # - BackupPC::FileZIO->compressCopy("$TopDir/log/LOG", - "$TopDir/log/LOG.0.z", - "$TopDir/log/LOG.0", + BackupPC::FileZIO->compressCopy("$LogDir/LOG", + "$LogDir/LOG.0.z", + "$LogDir/LOG.0", $Conf{CompressLevel}, 1); LogFileOpen(); # - # Remember to run nightly script after current jobs are done + # Remember to run the nightly script when the next CmdQueue + # job is done. # - $RunNightlyWhenIdle = 1; + if ( $RunNightlyWhenIdle == 2 ) { + print(LOG $bpc->timeStamp, "BackupPC_nightly is still running after 24 hours!!" + . " You should adjust the config settings; Skipping this run\n"); + } else { + $RunNightlyWhenIdle = 1; + } } # # Write out the current status and then queue all the PCs @@ -807,6 +963,8 @@ sub Main_Check_Job_Messages if ( $Status{$host}{dhcpCheckCnt} > 0 ); } elsif ( $mesg =~ /^xferPids (.*)/ ) { $Jobs{$host}{xferPid} = $1; + } elsif ( $mesg =~ /^completionPercent (.*)/ ) { + $Jobs{$host}{completionPercent} = $1; } elsif ( $mesg =~ /^started_restore/ ) { $Jobs{$host}{type} = "restore"; print(LOG $bpc->timeStamp, @@ -835,6 +993,7 @@ sub Main_Check_Job_Messages delete($Status{$host}{error}); delete($Status{$host}{errorTime}); $Status{$host}{endTime} = time; + $Status{$host}{lastGoodBackupTime} = time; } elsif ( $mesg =~ /^backups disabled/ ) { print(LOG $bpc->timeStamp, "Ignoring old backup error on $host\n"); @@ -922,33 +1081,34 @@ sub Main_Check_Job_Messages } } elsif ( $mesg =~ /^log\s+(.*)/ ) { print(LOG $bpc->timeStamp, "$1\n"); - } elsif ( $mesg =~ /^BackupPC_stats = (.*)/ ) { - my @f = split(/,/, $1); - $Info{"$f[0]FileCnt"} = $f[1]; - $Info{"$f[0]DirCnt"} = $f[2]; - $Info{"$f[0]Kb"} = $f[3]; - $Info{"$f[0]Kb2"} = $f[4]; - $Info{"$f[0]KbRm"} = $f[5]; - $Info{"$f[0]FileCntRm"} = $f[6]; - $Info{"$f[0]FileCntRep"} = $f[7]; - $Info{"$f[0]FileRepMax"} = $f[8]; - $Info{"$f[0]FileCntRename"} = $f[9]; - $Info{"$f[0]FileLinkMax"} = $f[10]; - $Info{"$f[0]Time"} = time; - printf(LOG "%s%s nightly clean removed %d files of" - . " size %.2fGB\n", - $bpc->timeStamp, ucfirst($f[0]), - $Info{"$f[0]FileCntRm"}, - $Info{"$f[0]KbRm"} / (1000 * 1024)); - printf(LOG "%s%s is %.2fGB, %d files (%d repeated, " - . "%d max chain, %d max links), %d directories\n", - $bpc->timeStamp, ucfirst($f[0]), - $Info{"$f[0]Kb"} / (1000 * 1024), - $Info{"$f[0]FileCnt"}, $Info{"$f[0]FileCntRep"}, - $Info{"$f[0]FileRepMax"}, - $Info{"$f[0]FileLinkMax"}, $Info{"$f[0]DirCnt"}); + } elsif ( $mesg =~ /^BackupPC_stats (\d+) = (.*)/ ) { + my $chunk = int($1 / 16); + my @f = split(/,/, $2); + $Info{pool}{$f[0]}[$chunk]{FileCnt} += $f[1]; + $Info{pool}{$f[0]}[$chunk]{DirCnt} += $f[2]; + $Info{pool}{$f[0]}[$chunk]{Kb} += $f[3]; + $Info{pool}{$f[0]}[$chunk]{Kb2} += $f[4]; + $Info{pool}{$f[0]}[$chunk]{KbRm} += $f[5]; + $Info{pool}{$f[0]}[$chunk]{FileCntRm} += $f[6]; + $Info{pool}{$f[0]}[$chunk]{FileCntRep} += $f[7]; + $Info{pool}{$f[0]}[$chunk]{FileRepMax} = $f[8] + if ( $Info{pool}{$f[0]}[$chunk]{FileRepMax} < $f[8] ); + $Info{pool}{$f[0]}[$chunk]{FileCntRename} += $f[9]; + $Info{pool}{$f[0]}[$chunk]{FileLinkMax} = $f[10] + if ( $Info{pool}{$f[0]}[$chunk]{FileLinkMax} < $f[10] ); + $Info{pool}{$f[0]}[$chunk]{FileLinkTotal} += $f[11]; + $Info{pool}{$f[0]}[$chunk]{Time} = time; } elsif ( $mesg =~ /^BackupPC_nightly lock_off/ ) { - $RunNightlyWhenIdle = 0; + $BackupPCNightlyLock--; + if ( $BackupPCNightlyLock == 0 ) { + # + # This means the last BackupPC_nightly is done with + # the pool clean, so it's ok to start running regular + # backups again. But starting in 3.0 regular jobs + # are decoupled from BackupPC_nightly. + # + $RunNightlyWhenIdle = 0; + } } elsif ( $mesg =~ /^processState\s+(.+)/ ) { $Jobs{$host}{processState} = $1; } elsif ( $mesg =~ /^link\s+(.+)/ ) { @@ -964,15 +1124,84 @@ sub Main_Check_Job_Messages if ( $nbytes <= 0 ) { close($Jobs{$host}{fh}); vec($FDread, $Jobs{$host}{fn}, 1) = 0; - if ( $CmdJob eq $host ) { + if ( $CmdJob eq $host || $bpc->isAdminJob($host) ) { my $cmd = $Jobs{$host}{cmd}; $cmd =~ s/$BinDir\///g; print(LOG $bpc->timeStamp, "Finished $host ($cmd)\n"); $Status{$host}{state} = "Status_idle"; $Status{$host}{endTime} = time; - $CmdJob = ""; - $RunNightlyWhenIdle = 0 if ( $cmd eq "BackupPC_nightly" - && $RunNightlyWhenIdle ); + if ( $cmd =~ /^BackupPC_nightly\s/ ) { + $BackupPCNightlyJobs--; + #print(LOG $bpc->timeStamp, "BackupPC_nightly done; now" + # . " have $BackupPCNightlyJobs running\n"); + if ( $BackupPCNightlyJobs <= 0 ) { + # + # Last BackupPC_nightly has finished + # + $BackupPCNightlyJobs = 0; + $RunNightlyWhenIdle = 0; + $CmdJob = ""; + # + # Combine the 16 per-directory results + # + for my $p ( qw(pool cpool) ) { + $Info{"${p}FileCnt"} = 0; + $Info{"${p}DirCnt"} = 0; + $Info{"${p}Kb"} = 0; + $Info{"${p}Kb2"} = 0; + $Info{"${p}KbRm"} = 0; + $Info{"${p}FileCntRm"} = 0; + $Info{"${p}FileCntRep"} = 0; + $Info{"${p}FileRepMax"} = 0; + $Info{"${p}FileCntRename"} = 0; + $Info{"${p}FileLinkMax"} = 0; + $Info{"${p}Time"} = 0; + for ( my $i = 0 ; $i < 16 ; $i++ ) { + $Info{"${p}FileCnt"} + += $Info{pool}{$p}[$i]{FileCnt}; + $Info{"${p}DirCnt"} + += $Info{pool}{$p}[$i]{DirCnt}; + $Info{"${p}Kb"} + += $Info{pool}{$p}[$i]{Kb}; + $Info{"${p}Kb2"} + += $Info{pool}{$p}[$i]{Kb2}; + $Info{"${p}KbRm"} + += $Info{pool}{$p}[$i]{KbRm}; + $Info{"${p}FileCntRm"} + += $Info{pool}{$p}[$i]{FileCntRm}; + $Info{"${p}FileCntRep"} + += $Info{pool}{$p}[$i]{FileCntRep}; + $Info{"${p}FileRepMax"} + = $Info{pool}{$p}[$i]{FileRepMax} + if ( $Info{"${p}FileRepMax"} < + $Info{pool}{$p}[$i]{FileRepMax} ); + $Info{"${p}FileCntRename"} + += $Info{pool}{$p}[$i]{FileCntRename}; + $Info{"${p}FileLinkMax"} + = $Info{pool}{$p}[$i]{FileLinkMax} + if ( $Info{"${p}FileLinkMax"} < + $Info{pool}{$p}[$i]{FileLinkMax} ); + $Info{"${p}Time"} = $Info{pool}{$p}[$i]{Time} + if ( $Info{"${p}Time"} < + $Info{pool}{$p}[$i]{Time} ); + } + printf(LOG "%s%s nightly clean removed %d files of" + . " size %.2fGB\n", + $bpc->timeStamp, ucfirst($p), + $Info{"${p}FileCntRm"}, + $Info{"${p}KbRm"} / (1000 * 1024)); + printf(LOG "%s%s is %.2fGB, %d files (%d repeated, " + . "%d max chain, %d max links), %d directories\n", + $bpc->timeStamp, ucfirst($p), + $Info{"${p}Kb"} / (1000 * 1024), + $Info{"${p}FileCnt"}, $Info{"${p}FileCntRep"}, + $Info{"${p}FileRepMax"}, + $Info{"${p}FileLinkMax"}, $Info{"${p}DirCnt"}); + } + } + } else { + $CmdJob = ""; + } } else { # # Queue BackupPC_link to complete the backup @@ -1060,7 +1289,7 @@ sub Main_Check_Client_Messages last; } $Clients{$client}{mesgCnt}++; - $cmd = $2; + $cmd = decode_utf8($2); if ( $cmd =~ /^stop (\S+)\s+(\S+)\s+(\S*)/ ) { $host = $1; my $user = $2; @@ -1118,46 +1347,68 @@ sub Main_Check_Client_Messages } } elsif ( $cmd =~ /^backup all$/ ) { QueueAllPCs(); + } elsif ( $cmd =~ /^BackupPC_nightly run$/ ) { + $RunNightlyWhenIdle = 1; } elsif ( $cmd =~ /^backup (\S+)\s+(\S+)\s+(\S+)\s+(\S+)/ ) { - my $hostIP = $1; - $host = $2; - my $user = $3; - my $doFull = $4; - $host = $bpc->uriUnesc($host); - $hostIP = $bpc->uriUnesc($hostIP); - if ( !defined($Status{$host}) ) { + my $hostIP = $1; + $host = $2; + my $user = $3; + my $backupType = $4; + $host = $bpc->uriUnesc($host); + $hostIP = $bpc->uriUnesc($hostIP); + if ( !defined($Hosts->{$host}) ) { print(LOG $bpc->timeStamp, "User $user requested backup of unknown host" . " $host\n"); $reply = "error: unknown host $host"; - } elsif ( defined($Jobs{$host}) - && $Jobs{$host}{type} ne "restore" ) { - print(LOG $bpc->timeStamp, - "User $user requested backup of $host," - . " but one is currently running\n"); - $reply = "error: backup of $host is already running"; } else { - print(LOG $bpc->timeStamp, - "User $user requested backup of $host" - . " ($hostIP)\n"); - if ( $BgQueueOn{$hostIP} ) { - @BgQueue = grep($_->{host} ne $hostIP, @BgQueue); - $BgQueueOn{$hostIP} = 0; - } - if ( $UserQueueOn{$hostIP} ) { - @UserQueue = grep($_->{host} ne $hostIP, @UserQueue); - $UserQueueOn{$hostIP} = 0; + # + # Handle numeric backupType for backward compatibility + # (technically -1 is a new feature for auto) + # + $backupType = 'auto' if ( $backupType eq '-1' ); + $backupType = 'doIncr' if ( $backupType eq '0' ); + $backupType = 'doFull' if ( $backupType eq '1' ); + if ( $backupType !~ /^doIncr|doFull|autoIncr|autoFull|auto$/i ) { + $reply = "error: unknown backup type $backupType"; + } else { + print(LOG $bpc->timeStamp, + "User $user requested backup of $host" + . " ($hostIP)\n"); + if ( $BgQueueOn{$hostIP} ) { + @BgQueue = grep($_->{host} ne $hostIP, @BgQueue); + $BgQueueOn{$hostIP} = 0; + } + if ( $UserQueueOn{$hostIP} ) { + @UserQueue = grep($_->{host} ne $hostIP, @UserQueue); + $UserQueueOn{$hostIP} = 0; + } + my $status = QueueOnePC($host, $hostIP, $user, 'user', $backupType); + if ( $status == 0 ) { + $reply = "ok: requested backup of $host ($backupType)"; + } elsif ( $status == 1 ) { + #should never see this we just dequeued it + $reply = "warning: $host was already queued." + . " Ignoring this request"; + } elsif ( $status == 2 ) { + print(LOG $bpc->timeStamp, + "Disk too full ($Info{DUlastValue}%)." + . " Not queueing backup of $host\n"); + $reply = "error: disk too full ($Info{DUlastValue}%)"; + $Info{DUDailySkipHostCnt}++; + } elsif ( $status == 3 ) { + # should never reach this because + # it's set to "user" above + $reply = "error: unknown queue name"; + } else { + $reply = "error: unknown queue status $status"; + if ( $BgQueueOn{$hostIP} || $UserQueueOn{$hostIP} ) { + $reply .= ". Host is queued."; + } else { + $reply .= ". Host is not queued."; + } + } } - unshift(@UserQueue, { - host => $hostIP, - user => $user, - reqTime => time, - doFull => $doFull, - userReq => 1, - dhcp => $hostIP eq $host ? 0 : 1, - }); - $UserQueueOn{$hostIP} = 1; - $reply = "ok: requested backup of $host"; } } elsif ( $cmd =~ /^archive (\S+)\s+(\S+)\s+(\S+)/ ) { my $user = $1; @@ -1178,7 +1429,7 @@ sub Main_Check_Client_Messages } else { unshift(@UserQueue, { host => $host, - hostIP => $user, + user => $user, reqFileName => $reqFileName, reqTime => time, dhcp => 0, @@ -1196,7 +1447,7 @@ sub Main_Check_Client_Messages my $reqFileName = $4; $host = $bpc->uriUnesc($host); $hostIP = $bpc->uriUnesc($hostIP); - if ( !defined($Status{$host}) ) { + if ( !defined($Hosts->{$host}) ) { print(LOG $bpc->timeStamp, "User $user requested restore to unknown host" . " $host"); @@ -1365,7 +1616,7 @@ sub Main_Check_Client_Messages ########################################################################### # -# Write the current status to $TopDir/log/status.pl +# Write the current status to $LogDir/status.pl # sub StatusWrite { @@ -1373,70 +1624,145 @@ sub StatusWrite [ \%Info, \%Status], [qw(*Info *Status)]); $dump->Indent(1); - if ( open(STATUS, ">", "$TopDir/log/status.pl") ) { - print(STATUS $dump->Dump); - close(STATUS); - } + my $text = $dump->Dump; + $bpc->{storage}->TextFileWrite("$LogDir/status.pl", $text); } # -# Queue all the hosts for backup. This means queuing all the fixed -# ip hosts and all the dhcp address ranges. We also additionally -# queue the dhcp hosts with a -e flag to check for expired dumps. +# Compare function for host sort. Hosts with errors go first, +# sorted with the oldest errors first. The remaining hosts +# are sorted so that those with the oldest backups go first. # -sub QueueAllPCs +sub HostSortCompare { - foreach my $host ( sort(keys(%$Hosts)) ) { - delete($Status{$host}{backoffTime}) - if ( defined($Status{$host}{backoffTime}) + # + # Hosts with errors go before hosts without errors + # + return -1 if ( $Status{$a}{error} ne "" && $Status{$b}{error} eq "" ); + + # + # Hosts with no errors go after hosts with errors + # + return 1 if ( $Status{$a}{error} eq "" && $Status{$b}{error} ne "" ); + + # + # hosts with the older last good backups sort earlier + # + my $r = $Status{$a}{lastGoodBackupTime} <=> $Status{$b}{lastGoodBackupTime}; + return $r if ( $r ); + + # + # Finally, just sort based on host name + # + return $a cmp $b; +} + +# +# Attempt to queue a host. +# Returns 0 on success; 1 if host is already queued; +# 2 if host was skipped; 3 on invalid queue name +# +# $host is the client's host name +# $hostIP is usually the client's host name too, or IP address +# if the user specified it in the manual backup command +# $user is the user name, or BackupPC by default +# $queue is which queue to use ("bg" by default) +# $backupType is the backup type (doIncr|doFull|autoIncr|autoFull|auto|dhcpPoll) +# +# Note: starting in 3.2.0, the PC is queued even if it has a current +# job running +# +sub QueueOnePC +{ + my($host, $hostIP, $user, $queue, $backupType) = @_; + my $retVal = 0; + $user = "BackupPC" if ( $user eq '' ); + $queue = "bg" if ( $queue eq '' && $user eq 'BackupPC' ); + $backupType = "auto" if ( $backupType eq '' ); + + delete($Status{$host}{backoffTime}) + if ( defined($Status{$host}{backoffTime}) && $Status{$host}{backoffTime} < time ); - next if ( defined($Jobs{$host}) - || $BgQueueOn{$host} - || $UserQueueOn{$host} - || $CmdQueueOn{$host} ); - if ( $Hosts->{$host}{dhcp} ) { - $Status{$host}{dhcpCheckCnt}++; - if ( $RunNightlyWhenIdle ) { - # - # Once per night queue a check for DHCP hosts that just - # checks for expired dumps. We need to do this to handle - # the case when a DHCP host has not been on the network for - # a long time, and some of the old dumps need to be expired. - # Normally expiry checks are done by BackupPC_dump only - # after the DHCP hosts has been detected on the network. - # - unshift(@BgQueue, - {host => $host, user => "BackupPC", reqTime => time, - dhcp => 0, dumpExpire => 1}); - $BgQueueOn{$host} = 1; - } - } else { + return 1 if ( $BgQueueOn{$host} || $UserQueueOn{$host} ); + if ( $Hosts->{$host}{dhcp} ) { + $Status{$host}{dhcpCheckCnt}++; + if ( $RunNightlyWhenIdle ) { # - # this is a fixed ip host: queue it + # Once per night queue a check for DHCP hosts that just + # checks for expired dumps. We need to do this to handle + # the case when a DHCP host has not been on the network for + # a long time, and some of the old dumps need to be expired. + # Normally expiry checks are done by BackupPC_dump only + # after the DHCP hosts has been detected on the network. # unshift(@BgQueue, - {host => $host, user => "BackupPC", reqTime => time, - dhcp => $Hosts->{$host}{dhcp}}); + {host => $hostIP, user => $user, reqTime => time, + dhcp => 0, dumpExpire => 1}); $BgQueueOn{$host} = 1; } + } else { + # + # this is a fixed ip host or DHCP ip address: queue it + # + if ( $Info{DUlastValue} > $Conf{DfMaxUsagePct} ) { + # + # Since we are out of disk space, instead of queuing + # a regular job, queue an expire check instead. That + # way if the admin reduces the number of backups to + # keep then we will actually delete them. Otherwise + # BackupPC_dump will never run since we have exceeded + # the limit. + # + $retVal = 2; + unshift(@BgQueue, + {host => $hostIP, user => $user, reqTime => time, dumpExpire => 1}); + $BgQueueOn{$host} = 1; + } elsif( $queue eq 'bg' ) { + # + # Queue regular background backup + # + unshift(@BgQueue, + {host => $hostIP, user => $user, reqTime => time, backupType => $backupType}); + $BgQueueOn{$host} = 1; + } elsif( $queue eq 'user' ) { + # + # Queue user backup + # + unshift(@UserQueue, + {host => $hostIP, user => $user, reqTime => time, backupType => $backupType}); + $UserQueueOn{$host} = 1; + } else { + # unknown $queue type + $retVal = 3; + } + } + + return $retVal; +} + +# +# Queue all the hosts for backup. This means queuing all the fixed +# ip hosts and all the dhcp address ranges. We also additionally +# queue the dhcp hosts with a -e flag to check for expired dumps. +# +sub QueueAllPCs +{ + my $nSkip = 0; + + foreach my $host ( sort(HostSortCompare keys(%$Hosts)) ) { + $nSkip++ if ( QueueOnePC($host, $host, 'BackupPC', 'bg', 'auto') == 2 ); } foreach my $dhcp ( @{$Conf{DHCPAddressRanges}} ) { for ( my $i = $dhcp->{first} ; $i <= $dhcp->{last} ; $i++ ) { my $ipAddr = "$dhcp->{ipAddrBase}.$i"; - next if ( defined($Jobs{$ipAddr}) - || $BgQueueOn{$ipAddr} - || $UserQueueOn{$ipAddr} - || $CmdQueueOn{$ipAddr} ); - # - # this is a potential dhcp ip address (we don't know the - # host name yet): queue it - # - unshift(@BgQueue, - {host => $ipAddr, user => "BackupPC", reqTime => time, - dhcp => 1}); - $BgQueueOn{$ipAddr} = 1; + $nSkip++ if ( QueueOnePC($ipAddr, $ipAddr, 'BackupPC', 'bg', 'dhcpPoll') == 2 ); } } + if ( $nSkip ) { + print(LOG $bpc->timeStamp, + "Disk too full ($Info{DUlastValue}%); skipped $nSkip hosts\n"); + $Info{DUDailySkipHostCnt} += $nSkip; + } } # @@ -1492,7 +1818,7 @@ sub HostsUpdate } foreach my $host ( sort(keys(%Status)) ) { next if ( $host eq $bpc->trashJob - || $host eq $bpc->adminJob + || $bpc->isAdminJob($host) || defined($Hosts->{$host}) || defined($Jobs{$host}) || $BgQueueOn{$host} @@ -1520,7 +1846,7 @@ sub catch_signal close(LOG); LogFileOpen(); print(LOG "Fatal error: unhandled signal $SigName\n"); - unlink("$TopDir/log/BackupPC.pid"); + unlink("$LogDir/BackupPC.pid"); confess("Got new signal $SigName... quitting\n"); } else { $SigName = shift; @@ -1532,9 +1858,9 @@ sub catch_signal # sub LogFileOpen { - mkpath("$TopDir/log", 0, 0777) if ( !-d "$TopDir/log" ); - open(LOG, ">>$TopDir/log/LOG") - || die("Can't create LOG file $TopDir/log/LOG"); + mkpath($LogDir, 0, 0777) if ( !-d $LogDir ); + open(LOG, ">>$LogDir/LOG") + || die("Can't create LOG file $LogDir/LOG"); close(STDOUT); close(STDERR); open(STDOUT, ">&LOG"); @@ -1559,7 +1885,7 @@ sub ServerSocketInit print(LOG $bpc->timeStamp, "unix socket() failed: $!\n"); exit(1); } - my $sockFile = "$TopDir/log/BackupPC.sock"; + my $sockFile = "$LogDir/BackupPC.sock"; unlink($sockFile); if ( !bind(SERVER_UNIX, sockaddr_un($sockFile)) ) { print(LOG $bpc->timeStamp, "unix bind() failed: $!\n"); @@ -1640,8 +1966,9 @@ sub ServerShutdown } %Jobs = (); } + delete($Info{pid}); StatusWrite(); - unlink("$TopDir/log/BackupPC.pid"); + unlink("$LogDir/BackupPC.pid"); exit(1); }