use File::Pid;
use POSIX qw/strftime/;
use BackupPC::SearchLib;
+use Cwd qw/abs_path/;
use constant BPC_FTYPE_DIR => 5;
-use constant EST_CHUNK => 100000;
+use constant EST_CHUNK => 4096;
+
+# daylight saving time change offset for 1h
+my $dst_offset = 60 * 60;
my $debug = 0;
$|=1;
my $start_t = time();
-my $pidfile = new File::Pid;
+my $pid_path = abs_path($0);
+$pid_path =~ s/\W+/_/g;
+
+my $pidfile = new File::Pid({
+ file => "/tmp/$pid_path",
+});
if (my $pid = $pidfile->running ) {
die "$0 already running: $pid\n";
$pidfile->remove;
$pidfile = new File::Pid;
}
-$pidfile->write;
print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n";
+$pidfile->write;
my $t_fmt = '%Y-%m-%d %H:%M:%S';
my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n";
my $user = $Conf{SearchUser} || '';
-my $use_hest = $Conf{HyperEstraierIndex};
-my ($index_path, $index_node_url) = BackupPC::SearchLib::getHyperEstraier_url($use_hest);
+my $index_node_url = $Conf{HyperEstraierIndex};
my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 });
my %opt;
-if ( !getopts("cdm:v:ij", \%opt ) ) {
+if ( !getopts("cdm:v:ijfq", \%opt ) ) {
print STDERR <<EOF;
-usage: $0 [-c|-d] [-m num] [-v|-v level] [-i]
+usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f]
Options:
-c create database on first use
-v num set verbosity (debug) level (default $debug)
-i update Hyper Estraier full text index
-j update full text, don't check existing files
+ -f don't do anything with full text index
+ -q be quiet for hosts without changes
Option -j is variation on -i. It will allow faster initial creation
of full-text index from existing database.
+Option -f will create database which is out of sync with full text index. You
+will have to re-run $0 with -i to fix it.
+
EOF
exit 1;
}
if ($opt{v}) {
print "Debug level at $opt{v}\n";
$debug = $opt{v};
+} elsif ($opt{f}) {
+ print "WARNING: disabling full-text index update. You need to re-run $0 -j !\n";
+ $index_node_url = undef;
}
#---- subs ----
return strftime($t_fmt,localtime());
}
-my $hest_db;
my $hest_node;
-sub signal {
- my($sig) = @_;
- if ($hest_db) {
- print "\nCaught a SIG$sig--syncing database and shutting down\n";
- $hest_db->sync();
- $hest_db->close();
- }
- exit(0);
-}
-
-$SIG{'INT'} = \&signal;
-$SIG{'QUIT'} = \&signal;
-
sub hest_update {
my ($host_id, $share_id, $num) = @_;
my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
- unless ($use_hest) {
- print STDERR "HyperEstraier support not enabled in configuration\n";
+ unless ($index_node_url && $index_node_url =~ m#^http://#) {
+ print STDERR "HyperEstraier support not enabled or index node invalid\n" if ($debug);
+ $index_node_url = 0;
return;
}
- print curr_time," updating HyperEstraier:";
+ print curr_time," updating Hyper Estraier:";
my $t = time();
my $offset = 0;
my $added = 0;
- print " opening index $use_hest";
- if ($index_path) {
- $hest_db = HyperEstraier::Database->new();
- $hest_db->open($TopDir . $index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT);
- print " directly";
- } elsif ($index_node_url) {
- $hest_node ||= HyperEstraier::Node->new($index_node_url);
- $hest_node->set_auth('admin', 'admin');
+ if ($index_node_url) {
+ print " opening index $index_node_url";
+ $hest_node ||= Search::Estraier::Node->new(
+ url => $index_node_url,
+ user => 'admin',
+ passwd => 'admin',
+ croak_on_error => 1,
+ );
print " via node URL";
- } else {
- die "don't know how to use HyperEstraier Index $use_hest";
}
- print " increment is " . EST_CHUNK . " files:";
my $results = 0;
if ($results == 0) {
print " - no new files\n";
- last;
+ return;
+ } else {
+ print "...";
}
sub fmt_date {
while (my $row = $sth->fetchrow_hashref()) {
- my $fid = $row->{'fid'} || die "no fid?";
- my $uri = 'file:///' . $fid;
-
- unless ($skip_check) {
- my $id = ($hest_db || $hest_node)->uri_to_id($uri);
- next unless ($id == -1);
+ my $uri = $row->{hname} . ':' . $row->{sname} . '#' . $row->{backupnum} . ' ' . $row->{filepath};
+ if (! $skip_check && $hest_node) {
+ my $id = $hest_node->uri_to_id($uri);
+ next if ($id && $id == -1);
}
# create a document object
- my $doc = HyperEstraier::Document->new;
+ my $doc = Search::Estraier::Document->new;
# add attributes to the document object
$doc->add_attr('@uri', $uri);
foreach my $c (@{ $sth->{NAME} }) {
- $doc->add_attr($c, $row->{$c}) if ($row->{$c});
+ print STDERR "attr $c = $row->{$c}\n" if ($debug > 2);
+ $doc->add_attr($c, $row->{$c}) if (defined($row->{$c}));
}
#$doc->add_attr('@cdate', fmt_date($row->{'date'}));
print STDERR $doc->dump_draft,"\n" if ($debug > 1);
# register the document object to the database
- if ($hest_db) {
- $hest_db->put_doc($doc, $HyperEstraier::Database::PDCLEAN);
- } elsif ($hest_node) {
- $hest_node->put_doc($doc);
- } else {
- die "not supported";
- }
+ $hest_node->put_doc($doc) if ($hest_node);
+
$added++;
}
- print " $added";
- $hest_db->sync() if ($index_path);
+ print "$added";
$offset += EST_CHUNK;
} while ($results == EST_CHUNK);
- if ($index_path) {
- print ", close";
- $hest_db->close();
- }
-
my $dur = (time() - $t) || 1;
printf(" [%.2f/s dur: %s]\n",
( $added / $dur ),
## update index ##
-if (($opt{i} || $opt{j} || ($index_path && ! -e $index_path)) && !$opt{c}) {
+if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) {
# update all
- print "force update of HyperEstraier index ";
- print "importing existing data" unless (-e $index_path);
+ print "force update of Hyper Estraier index ";
print "by -i flag" if ($opt{i});
print "by -j flag" if ($opt{j});
print "\n";
size bigint not null,
inc_size bigint not null default -1,
inc_deleted boolean default false,
- parts integer not null default 1,
+ parts integer not null default 0,
PRIMARY KEY(id)
);
);
create table archive_burned (
- archive_id int references archive(id),
- date date default now(),
+ archive_id int references archive(id),
+ date timestamp default now(),
+ part int not null default 1,
+ copy int not null default 1,
iso_size bigint default -1
);
+ create table backup_parts (
+ id serial,
+ backup_id int references backups(id),
+ part_nr int not null check (part_nr > 0),
+ tar_size bigint not null check (tar_size > 0),
+ size bigint not null check (size > 0),
+ md5 text not null,
+ items int not null check (items > 0),
+ date timestamp default now(),
+ primary key(id)
+ );
});
print "creating indexes: ";
files:size
archive:dvd_nr
archive_burned:archive_id
+ backup_parts:backup_id,part_nr
)) {
do_index($index);
}
$dbh->do( qq{ CREATE SEQUENCE $seq } );
}
+ print " creating triggers ";
+ $dbh->do( qq{
+ create or replace function backup_parts_check() returns trigger as '
+ declare
+ b_parts integer;
+ b_counted integer;
+ b_id integer;
+ begin
+ if (TG_OP=''UPDATE'') then
+ b_id := old.id;
+ b_parts := old.parts;
+ elsif (TG_OP = ''INSERT'') then
+ b_id := new.id;
+ b_parts := new.parts;
+ end if;
+ b_counted := (select count(*) from backup_parts where backup_id = b_id);
+ if ( b_parts != b_counted ) then
+ raise exception ''Update of backup % aborted, requested % parts and there are really % parts'', b_id, b_parts, b_counted;
+ end if;
+ return null;
+ end;
+ ' language plpgsql;
+
+ create trigger do_backup_parts_check
+ after insert or update or delete on backups
+ for each row execute procedure backup_parts_check();
+ });
print "...\n";
$sth->{insert_backups} = $dbh->prepare(qq{
INSERT INTO backups (hostID, num, date, type, shareid, size)
-VALUES (?,?,?,?,?,?)
+VALUES (?,?,?,?,?,-1)
+});
+
+$sth->{update_backups_size} = $dbh->prepare(qq{
+UPDATE backups SET size = ?
+WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ?
});
$sth->{insert_files} = $dbh->prepare(qq{
VALUES (?,?,?,?,?,?,?)
});
-foreach my $host_key (keys %{$hosts}) {
+my @hosts = keys %{$hosts};
+my $host_nr = 0;
+
+foreach my $host_key (@hosts) {
my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key";
$hostID = $dbh->last_insert_id(undef,undef,'hosts',undef);
}
- print "host ".$hosts->{$host_key}->{'host'}.": ";
-
+ $host_nr++;
# get backups for a host
my @backups = $bpc->BackupInfoRead($hostname);
my $incs = scalar @backups;
- print "$incs increments\n";
+ my $host_header = sprintf("host %s [%d/%d]: %d increments\n",
+ $hosts->{$host_key}->{'host'},
+ $host_nr,
+ ($#hosts + 1),
+ $incs
+ );
+ print $host_header unless ($opt{q});
+
my $inc_nr = 0;
$beenThere = {};
my $backupNum = $backup->{'num'};
my @backupShares = ();
- printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
+ my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
$hosts->{$host_key}->{'host'},
$inc_nr, $incs, $backupNum,
$backup->{type} || '?',
strftime($t_fmt,localtime($backup->{startTime})),
fmt_time($backup->{endTime} - $backup->{startTime})
);
+ print $share_header unless ($opt{q});
my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1);
foreach my $share ($files->shareList($backupNum)) {
# skip if allready in database!
next if ($count > 0);
+ # dump host and share header for -q
+ if ($opt{q}) {
+ if ($host_header) {
+ print $host_header;
+ $host_header = undef;
+ }
+ print $share_header;
+ }
+
# dump some log
print curr_time," ", $share;
- my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
-
$sth->{insert_backups}->execute(
$hostID,
$backupNum,
$backup->{'endTime'},
substr($backup->{'type'},0,4),
$shareID,
- $size,
);
- print " commit";
- $dbh->commit();
+ my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
+
+ eval {
+ $sth->{update_backups_size}->execute(
+ $size,
+ $hostID,
+ $backupNum,
+ $backup->{'endTime'},
+ substr($backup->{'type'},0,4),
+ $shareID,
+ );
+ print " commit";
+ $dbh->commit();
+ };
+ if ($@) {
+ print " rollback";
+ $dbh->rollback();
+ }
my $dur = (time() - $t) || 1;
printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",
SELECT 1 FROM files
WHERE shareID = ? and
path = ? and
- date = ? and
- size = ?
+ size = ? and
+ ( date = ? or date = ? or date = ? )
LIMIT 1
});
- my @param = ($shareID,$path,$date,$size);
+ my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset);
$sth->{file_in_db}->execute(@param);
my $rows = $sth->{file_in_db}->rows;
print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3);
$filesInBackup->{$path_key}->{'size'}
));
+ my $key_dst_prev = join(" ", (
+ $shareID,
+ $dir,
+ $path_key,
+ $filesInBackup->{$path_key}->{'mtime'} - $dst_offset,
+ $filesInBackup->{$path_key}->{'size'}
+ ));
+
+ my $key_dst_next = join(" ", (
+ $shareID,
+ $dir,
+ $path_key,
+ $filesInBackup->{$path_key}->{'mtime'} + $dst_offset,
+ $filesInBackup->{$path_key}->{'size'}
+ ));
+
my $found;
- if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) {
+ if (
+ ! defined($beenThere->{$key}) &&
+ ! defined($beenThere->{$key_dst_prev}) &&
+ ! defined($beenThere->{$key_dst_next}) &&
+ ! ($found = found_in_db($key, @data))
+ ) {
print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2);
if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {