X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=bin%2FBackupPC_updatedb;h=6f778ca6fdd3a732dcddfb4eb8782a03acfc410e;hb=200526442f13646fce3a3881207d16a332869f48;hp=233f4ee6982eb4a25c4061e6ba1ed39fc96c94cd;hpb=9098ca852cd77bb2b3e78b9e7718d5d6bf7b6067;p=BackupPC.git diff --git a/bin/BackupPC_updatedb b/bin/BackupPC_updatedb index 233f4ee..6f778ca 100755 --- a/bin/BackupPC_updatedb +++ b/bin/BackupPC_updatedb @@ -12,9 +12,10 @@ use Time::HiRes qw/time/; use File::Pid; use POSIX qw/strftime/; use BackupPC::SearchLib; +use Cwd qw/abs_path/; use constant BPC_FTYPE_DIR => 5; -use constant EST_CHUNK => 100000; +use constant EST_CHUNK => 4096; # daylight saving time change offset for 1h my $dst_offset = 60 * 60; @@ -24,7 +25,12 @@ $|=1; my $start_t = time(); -my $pidfile = new File::Pid; +my $pid_path = abs_path($0); +$pid_path =~ s/\W+/_/g; + +my $pidfile = new File::Pid({ + file => "/tmp/$pid_path", +}); if (my $pid = $pidfile->running ) { die "$0 already running: $pid\n"; @@ -32,8 +38,8 @@ if (my $pid = $pidfile->running ) { $pidfile->remove; $pidfile = new File::Pid; } -$pidfile->write; print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n"; +$pidfile->write; my $t_fmt = '%Y-%m-%d %H:%M:%S'; @@ -46,14 +52,13 @@ my $beenThere = {}; my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n"; my $user = $Conf{SearchUser} || ''; -my $use_hest = $Conf{HyperEstraierIndex}; -my ($index_path, $index_node_url) = BackupPC::SearchLib::getHyperEstraier_url($use_hest); +my $index_node_url = $Conf{HyperEstraierIndex}; my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 }); my %opt; -if ( !getopts("cdm:v:ijf", \%opt ) ) { +if ( !getopts("cdm:v:ijfq", \%opt ) ) { print STDERR <sync(); - $hest_db->close(); - } - exit(0); -} - -$SIG{'INT'} = \&signal; -$SIG{'QUIT'} = \&signal; - sub hest_update { my ($host_id, $share_id, $num) = @_; my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n"; - unless ($use_hest) { - print STDERR "HyperEstraier support not enabled in configuration\n"; + unless ($index_node_url && $index_node_url =~ m#^http://#) { + print STDERR "HyperEstraier support not enabled or index node invalid\n" if ($debug); + $index_node_url = 0; return; } - print curr_time," updating HyperEstraier:"; + print curr_time," updating Hyper Estraier:"; my $t = time(); my $offset = 0; my $added = 0; - print " opening index $use_hest"; - if ($index_path) { - $hest_db = HyperEstraier::Database->new(); - $hest_db->open($TopDir . $index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT); - print " directly"; - } elsif ($index_node_url) { - $hest_node ||= HyperEstraier::Node->new($index_node_url); - $hest_node->set_auth('admin', 'admin'); + if ($index_node_url) { + print " opening index $index_node_url"; + $hest_node ||= Search::Estraier::Node->new( + url => $index_node_url, + user => 'admin', + passwd => 'admin', + croak_on_error => 1, + ); print " via node URL"; - } else { - die "don't know how to use HyperEstraier Index $use_hest"; } - print " increment is " . EST_CHUNK . " files:"; my $results = 0; @@ -192,7 +182,9 @@ sub hest_update { if ($results == 0) { print " - no new files\n"; - last; + return; + } else { + print "..."; } sub fmt_date { @@ -204,21 +196,20 @@ sub hest_update { while (my $row = $sth->fetchrow_hashref()) { - my $fid = $row->{'fid'} || die "no fid?"; - my $uri = 'file:///' . $fid; - - unless ($skip_check) { - my $id = ($hest_db || $hest_node)->uri_to_id($uri); - next unless ($id == -1); + my $uri = $row->{hname} . ':' . $row->{sname} . '#' . $row->{backupnum} . ' ' . $row->{filepath}; + if (! $skip_check && $hest_node) { + my $id = $hest_node->uri_to_id($uri); + next if ($id && $id == -1); } # create a document object - my $doc = HyperEstraier::Document->new; + my $doc = Search::Estraier::Document->new; # add attributes to the document object $doc->add_attr('@uri', $uri); foreach my $c (@{ $sth->{NAME} }) { + print STDERR "attr $c = $row->{$c}\n" if ($debug > 2); $doc->add_attr($c, $row->{$c}) if (defined($row->{$c})); } @@ -233,28 +224,17 @@ sub hest_update { print STDERR $doc->dump_draft,"\n" if ($debug > 1); # register the document object to the database - if ($hest_db) { - $hest_db->put_doc($doc, $HyperEstraier::Database::PDCLEAN); - } elsif ($hest_node) { - $hest_node->put_doc($doc); - } else { - die "not supported"; - } + $hest_node->put_doc($doc) if ($hest_node); + $added++; } - print " $added"; - $hest_db->sync() if ($index_path); + print "$added"; $offset += EST_CHUNK; } while ($results == EST_CHUNK); - if ($index_path) { - print ", close"; - $hest_db->close(); - } - my $dur = (time() - $t) || 1; printf(" [%.2f/s dur: %s]\n", ( $added / $dur ), @@ -266,10 +246,9 @@ sub hest_update { ## update index ## -if (($opt{i} || $opt{j} || ($index_path && ! -e $TopDir . $index_path)) && !$opt{c}) { +if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) { # update all - print "force update of HyperEstraier index "; - print "importing existing data" unless (-e $TopDir . $index_path); + print "force update of Hyper Estraier index "; print "by -i flag" if ($opt{i}); print "by -j flag" if ($opt{j}); print "\n"; @@ -320,7 +299,7 @@ if ($opt{c}) { size bigint not null, inc_size bigint not null default -1, inc_deleted boolean default false, - parts integer not null default 1, + parts integer not null default 0, PRIMARY KEY(id) ); @@ -389,7 +368,7 @@ if ($opt{c}) { files:size archive:dvd_nr archive_burned:archive_id - backup_parts:backup_id,part_nr + backup_parts:backup_id,part_nr:unique )) { do_index($index); } @@ -400,6 +379,66 @@ if ($opt{c}) { $dbh->do( qq{ CREATE SEQUENCE $seq } ); } + print " creating triggers "; + $dbh->do( <<__END_OF_TRIGGER__ ); + +create or replace function backup_parts_check() returns trigger as ' +declare + b_parts integer; + b_counted integer; + b_id integer; +begin + -- raise notice ''old/new parts %/% backup_id %/%'', old.parts, new.parts, old.id, new.id; + if (TG_OP=''UPDATE'') then + b_id := new.id; + b_parts := new.parts; + elsif (TG_OP = ''INSERT'') then + b_id := new.id; + b_parts := new.parts; + end if; + b_counted := (select count(*) from backup_parts where backup_id = b_id); + -- raise notice ''backup % parts %'', b_id, b_parts; + if ( b_parts != b_counted ) then + raise exception ''Update of backup % aborted, requested % parts and there are really % parts'', b_id, b_parts, b_counted; + end if; + return null; +end; +' language plpgsql; + +create trigger do_backup_parts_check + after insert or update or delete on backups + for each row execute procedure backup_parts_check(); + +create or replace function backup_backup_parts_check() returns trigger as ' +declare + b_id integer; + my_part_nr integer; + calc_part integer; +begin + if (TG_OP = ''INSERT'') then + -- raise notice ''trigger: % backup_id %'', TG_OP, new.backup_id; + b_id = new.backup_id; + my_part_nr = new.part_nr; + execute ''update backups set parts = parts + 1 where id = '' || b_id; + elsif (TG_OP = ''DELETE'') then + -- raise notice ''trigger: % backup_id %'', TG_OP, old.backup_id; + b_id = old.backup_id; + my_part_nr = old.part_nr; + execute ''update backups set parts = parts - 1 where id = '' || b_id; + end if; + calc_part := (select count(part_nr) from backup_parts where backup_id = b_id); + if ( my_part_nr != calc_part ) then + raise exception ''Update of backup_parts with backup_id % aborted, requested part_nr is % and calulated next is %'', b_id, my_part_nr, calc_part; + end if; + return null; +end; +' language plpgsql; + +create trigger do_backup_backup_parts_check + after insert or update or delete on backup_parts + for each row execute procedure backup_backup_parts_check(); + +__END_OF_TRIGGER__ print "...\n"; @@ -458,7 +497,10 @@ INSERT INTO files VALUES (?,?,?,?,?,?,?) }); -foreach my $host_key (keys %{$hosts}) { +my @hosts = keys %{$hosts}; +my $host_nr = 0; + +foreach my $host_key (@hosts) { my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key"; @@ -473,13 +515,19 @@ foreach my $host_key (keys %{$hosts}) { $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef); } - print "host ".$hosts->{$host_key}->{'host'}.": "; - + $host_nr++; # get backups for a host my @backups = $bpc->BackupInfoRead($hostname); my $incs = scalar @backups; - print "$incs increments\n"; + my $host_header = sprintf("host %s [%d/%d]: %d increments\n", + $hosts->{$host_key}->{'host'}, + $host_nr, + ($#hosts + 1), + $incs + ); + print $host_header unless ($opt{q}); + my $inc_nr = 0; $beenThere = {}; @@ -491,7 +539,7 @@ foreach my $host_key (keys %{$hosts}) { my $backupNum = $backup->{'num'}; my @backupShares = (); - printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", + my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", $hosts->{$host_key}->{'host'}, $inc_nr, $incs, $backupNum, $backup->{type} || '?', @@ -499,6 +547,7 @@ foreach my $host_key (keys %{$hosts}) { strftime($t_fmt,localtime($backup->{startTime})), fmt_time($backup->{endTime} - $backup->{startTime}) ); + print $share_header unless ($opt{q}); my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1); foreach my $share ($files->shareList($backupNum)) { @@ -512,6 +561,15 @@ foreach my $host_key (keys %{$hosts}) { # skip if allready in database! next if ($count > 0); + # dump host and share header for -q + if ($opt{q}) { + if ($host_header) { + print $host_header; + $host_header = undef; + } + print $share_header; + } + # dump some log print curr_time," ", $share; @@ -525,17 +583,22 @@ foreach my $host_key (keys %{$hosts}) { my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); - $sth->{update_backups_size}->execute( - $size, - $hostID, - $backupNum, - $backup->{'endTime'}, - substr($backup->{'type'},0,4), - $shareID, - ); - - print " commit"; - $dbh->commit(); + eval { + $sth->{update_backups_size}->execute( + $size, + $hostID, + $backupNum, + $backup->{'endTime'}, + substr($backup->{'type'},0,4), + $shareID, + ); + print " commit"; + $dbh->commit(); + }; + if ($@) { + print " rollback"; + $dbh->rollback(); + } my $dur = (time() - $t) || 1; printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",