use Cwd qw/abs_path/;
use constant BPC_FTYPE_DIR => 5;
-use constant EST_CHUNK => 100000;
+use constant EST_CHUNK => 4096;
# daylight saving time change offset for 1h
my $dst_offset = 60 * 60;
my %opt;
-if ( !getopts("cdm:v:ijf", \%opt ) ) {
+if ( !getopts("cdm:v:ijfq", \%opt ) ) {
print STDERR <<EOF;
usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f]
-i update Hyper Estraier full text index
-j update full text, don't check existing files
-f don't do anything with full text index
+ -q be quiet for hosts without changes
Option -j is variation on -i. It will allow faster initial creation
of full-text index from existing database.
return strftime($t_fmt,localtime());
}
-my $hest_db;
my $hest_node;
-sub signal {
- my($sig) = @_;
- if ($hest_db) {
- print "\nCaught a SIG$sig--syncing database and shutting down\n";
- $hest_db->sync();
- $hest_db->close();
- }
- exit(0);
-}
-
-$SIG{'INT'} = \&signal;
-$SIG{'QUIT'} = \&signal;
-
sub hest_update {
my ($host_id, $share_id, $num) = @_;
my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
- unless (defined($index_node_url)) {
- print STDERR "HyperEstraier support not enabled in configuration\n";
+ unless ($index_node_url && $index_node_url =~ m#^http://#) {
+ print STDERR "HyperEstraier support not enabled or index node invalid\n" if ($debug);
$index_node_url = 0;
return;
}
my $offset = 0;
my $added = 0;
- print " opening index $index_node_url";
if ($index_node_url) {
- $hest_node ||= Search::Estraier::Node->new($index_node_url);
- $hest_node->set_auth('admin', 'admin');
+ print " opening index $index_node_url";
+ $hest_node ||= Search::Estraier::Node->new(
+ url => $index_node_url,
+ user => 'admin',
+ passwd => 'admin',
+ croak_on_error => 1,
+ );
print " via node URL";
- } else {
- die "don't know how to use Hyper Estraier Index $index_node_url";
}
my $results = 0;
if ($results == 0) {
print " - no new files\n";
- last;
+ return;
} else {
- print " - $results files: ";
+ print "...";
}
sub fmt_date {
while (my $row = $sth->fetchrow_hashref()) {
- my $fid = $row->{'fid'} || die "no fid?";
- my $uri = 'file:///' . $fid;
-
- unless ($skip_check) {
- my $id = ($hest_db || $hest_node)->uri_to_id($uri);
- next unless ($id == -1);
+ my $uri = $row->{hname} . ':' . $row->{sname} . '#' . $row->{backupnum} . ' ' . $row->{filepath};
+ if (! $skip_check && $hest_node) {
+ my $id = $hest_node->uri_to_id($uri);
+ next if ($id && $id == -1);
}
# create a document object
$doc->add_attr('@uri', $uri);
foreach my $c (@{ $sth->{NAME} }) {
+ print STDERR "attr $c = $row->{$c}\n" if ($debug > 2);
$doc->add_attr($c, $row->{$c}) if (defined($row->{$c}));
}
print STDERR $doc->dump_draft,"\n" if ($debug > 1);
# register the document object to the database
- if ($hest_node) {
- $hest_node->put_doc($doc);
- } else {
- die "not supported";
- }
+ $hest_node->put_doc($doc) if ($hest_node);
+
$added++;
}
- print " $added";
+ print "$added";
$offset += EST_CHUNK;
size bigint not null,
inc_size bigint not null default -1,
inc_deleted boolean default false,
- parts integer not null default 1,
+ parts integer not null default 0,
PRIMARY KEY(id)
);
files:size
archive:dvd_nr
archive_burned:archive_id
- backup_parts:backup_id,part_nr
+ backup_parts:backup_id,part_nr:unique
)) {
do_index($index);
}
$dbh->do( qq{ CREATE SEQUENCE $seq } );
}
+ print " creating triggers ";
+ $dbh->do( <<__END_OF_TRIGGER__ );
+
+create or replace function backup_parts_check() returns trigger as '
+declare
+ b_parts integer;
+ b_counted integer;
+ b_id integer;
+begin
+ -- raise notice ''old/new parts %/% backup_id %/%'', old.parts, new.parts, old.id, new.id;
+ if (TG_OP=''UPDATE'') then
+ b_id := new.id;
+ b_parts := new.parts;
+ elsif (TG_OP = ''INSERT'') then
+ b_id := new.id;
+ b_parts := new.parts;
+ end if;
+ b_counted := (select count(*) from backup_parts where backup_id = b_id);
+ -- raise notice ''backup % parts %'', b_id, b_parts;
+ if ( b_parts != b_counted ) then
+ raise exception ''Update of backup % aborted, requested % parts and there are really % parts'', b_id, b_parts, b_counted;
+ end if;
+ return null;
+end;
+' language plpgsql;
+
+create trigger do_backup_parts_check
+ after insert or update or delete on backups
+ for each row execute procedure backup_parts_check();
+
+create or replace function backup_backup_parts_check() returns trigger as '
+declare
+ b_id integer;
+ my_part_nr integer;
+ calc_part integer;
+begin
+ if (TG_OP = ''INSERT'') then
+ -- raise notice ''trigger: % backup_id %'', TG_OP, new.backup_id;
+ b_id = new.backup_id;
+ my_part_nr = new.part_nr;
+ execute ''update backups set parts = parts + 1 where id = '' || b_id;
+ elsif (TG_OP = ''DELETE'') then
+ -- raise notice ''trigger: % backup_id %'', TG_OP, old.backup_id;
+ b_id = old.backup_id;
+ my_part_nr = old.part_nr;
+ execute ''update backups set parts = parts - 1 where id = '' || b_id;
+ end if;
+ calc_part := (select count(part_nr) from backup_parts where backup_id = b_id);
+ if ( my_part_nr != calc_part ) then
+ raise exception ''Update of backup_parts with backup_id % aborted, requested part_nr is % and calulated next is %'', b_id, my_part_nr, calc_part;
+ end if;
+ return null;
+end;
+' language plpgsql;
+
+create trigger do_backup_backup_parts_check
+ after insert or update or delete on backup_parts
+ for each row execute procedure backup_backup_parts_check();
+
+__END_OF_TRIGGER__
print "...\n";
}
$host_nr++;
- print "host ", $hosts->{$host_key}->{'host'}, " [",
- $host_nr, "/", ($#hosts + 1), "]: ";
-
# get backups for a host
my @backups = $bpc->BackupInfoRead($hostname);
my $incs = scalar @backups;
- print "$incs increments\n";
+ my $host_header = sprintf("host %s [%d/%d]: %d increments\n",
+ $hosts->{$host_key}->{'host'},
+ $host_nr,
+ ($#hosts + 1),
+ $incs
+ );
+ print $host_header unless ($opt{q});
+
my $inc_nr = 0;
$beenThere = {};
my $backupNum = $backup->{'num'};
my @backupShares = ();
- printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
+ my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
$hosts->{$host_key}->{'host'},
$inc_nr, $incs, $backupNum,
$backup->{type} || '?',
strftime($t_fmt,localtime($backup->{startTime})),
fmt_time($backup->{endTime} - $backup->{startTime})
);
+ print $share_header unless ($opt{q});
my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1);
foreach my $share ($files->shareList($backupNum)) {
# skip if allready in database!
next if ($count > 0);
+ # dump host and share header for -q
+ if ($opt{q}) {
+ if ($host_header) {
+ print $host_header;
+ $host_header = undef;
+ }
+ print $share_header;
+ }
+
# dump some log
print curr_time," ", $share;