use Time::HiRes qw/time/;
use File::Pid;
use POSIX qw/strftime/;
+use BackupPC::SearchLib;
+use Cwd qw/abs_path/;
use constant BPC_FTYPE_DIR => 5;
+use constant EST_CHUNK => 4096;
+
+# daylight saving time change offset for 1h
+my $dst_offset = 60 * 60;
my $debug = 0;
$|=1;
my $start_t = time();
-my $pidfile = new File::Pid;
+my $pid_path = abs_path($0);
+$pid_path =~ s/\W+/_/g;
+
+my $pidfile = new File::Pid({
+ file => "/tmp/$pid_path",
+});
if (my $pid = $pidfile->running ) {
die "$0 already running: $pid\n";
$pidfile->remove;
$pidfile = new File::Pid;
}
-$pidfile->write;
print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n";
+$pidfile->write;
my $t_fmt = '%Y-%m-%d %H:%M:%S';
my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n";
my $user = $Conf{SearchUser} || '';
-my $index_path = $Conf{HyperEstraierIndex};
+
+my $index_node_url = $Conf{HyperEstraierIndex};
my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 });
my %opt;
-if ( !getopts("cdm:v:i", \%opt ) ) {
+if ( !getopts("cdm:v:ijfq", \%opt ) ) {
print STDERR <<EOF;
-usage: $0 [-c|-d] [-m num] [-v|-v level] [-i]
+usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f]
Options:
-c create database on first use
-d delete database before import
-m num import just num increments for one host
-v num set verbosity (debug) level (default $debug)
- -i update HyperEstraier full text index
+ -i update Hyper Estraier full text index
+ -j update full text, don't check existing files
+ -f don't do anything with full text index
+ -q be quiet for hosts without changes
+
+Option -j is variation on -i. It will allow faster initial creation
+of full-text index from existing database.
+
+Option -f will create database which is out of sync with full text index. You
+will have to re-run $0 with -i to fix it.
+
EOF
exit 1;
}
if ($opt{v}) {
print "Debug level at $opt{v}\n";
$debug = $opt{v};
+} elsif ($opt{f}) {
+ print "WARNING: disabling full-text index update. You need to re-run $0 -j !\n";
+ $index_node_url = undef;
}
#---- subs ----
return strftime($t_fmt,localtime());
}
-#---- /subs ----
+my $hest_node;
-## update index ##
-if ($opt{i}) {
+sub hest_update {
- print curr_time," updating HyperEstraier: files";
+ my ($host_id, $share_id, $num) = @_;
- my $t = time();
-
- my $sth = $dbh->prepare(qq{
- SELECT
- files.id AS fid,
- hosts.name AS hname,
- shares.name AS sname,
- -- shares.share AS sharename,
- files.backupnum AS backupnum,
- -- files.name AS filename,
- files.path AS filepath,
- files.date AS date,
- files.type AS filetype,
- files.size AS size,
- files.shareid AS shareid,
- backups.date AS backup_date
- FROM files
- INNER JOIN shares ON files.shareID=shares.ID
- INNER JOIN hosts ON hosts.ID = shares.hostID
- INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID
- });
+ my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
+
+ unless ($index_node_url && $index_node_url =~ m#^http://#) {
+ print STDERR "HyperEstraier support not enabled or index node invalid\n" if ($debug);
+ $index_node_url = 0;
+ return;
+ }
- $sth->execute();
- my $results = $sth->rows;
+ print curr_time," updating Hyper Estraier:";
- my $dot = int($results / 15);
+ my $t = time();
- print " $results ($dot/#)";
+ my $offset = 0;
+ my $added = 0;
- sub fmt_date {
- my $t = shift || return;
- my $iso = BackupPC::Lib::timeStamp($t);
- $iso =~ s/\s/T/;
- return $iso;
+ if ($index_node_url) {
+ print " opening index $index_node_url";
+ $hest_node ||= Search::Estraier::Node->new(
+ url => $index_node_url,
+ user => 'admin',
+ passwd => 'admin',
+ croak_on_error => 1,
+ );
+ print " via node URL";
}
- my $i = 0;
- my $max = int($results / $dot);
+ my $results = 0;
- $index_path = $TopDir . '/' . $index_path;
- $index_path =~ s#//#/#g;
+ do {
- print " index $index_path...";
- use HyperEstraier;
- my $db = HyperEstraier::Database->new();
- $db->open($index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT);
+ my $where = '';
+ my @data;
+ if (defined($host_id) && defined($share_id) && defined($num)) {
+ $where = qq{
+ WHERE
+ hosts.id = ? AND
+ shares.id = ? AND
+ files.backupnum = ?
+ };
+ @data = ( $host_id, $share_id, $num );
+ }
+ my $limit = sprintf('LIMIT '.EST_CHUNK.' OFFSET %d', $offset);
+
+ my $sth = $dbh->prepare(qq{
+ SELECT
+ files.id AS fid,
+ hosts.name AS hname,
+ shares.name AS sname,
+ -- shares.share AS sharename,
+ files.backupnum AS backupnum,
+ -- files.name AS filename,
+ files.path AS filepath,
+ files.date AS date,
+ files.type AS type,
+ files.size AS size,
+ files.shareid AS shareid,
+ backups.date AS backup_date
+ FROM files
+ INNER JOIN shares ON files.shareID=shares.ID
+ INNER JOIN hosts ON hosts.ID = shares.hostID
+ INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID
+ $where
+ $limit
+ });
+
+ $sth->execute(@data);
+ $results = $sth->rows;
+
+ if ($results == 0) {
+ print " - no new files\n";
+ return;
+ } else {
+ print "...";
+ }
- while (my $row = $sth->fetchrow_hashref()) {
+ sub fmt_date {
+ my $t = shift || return;
+ my $iso = BackupPC::Lib::timeStamp($t);
+ $iso =~ s/\s/T/;
+ return $iso;
+ }
- # create a document object
- my $doc = HyperEstraier::Document->new;
+ while (my $row = $sth->fetchrow_hashref()) {
- # add attributes to the document object
- $doc->add_attr('@uri', 'file:///' . $row->{'fid'});
+ my $uri = $row->{hname} . ':' . $row->{sname} . '#' . $row->{backupnum} . ' ' . $row->{filepath};
+ if (! $skip_check && $hest_node) {
+ my $id = $hest_node->uri_to_id($uri);
+ next if ($id && $id == -1);
+ }
- foreach my $c (@{ $sth->{NAME} }) {
- $doc->add_attr($c, $row->{$c}) if ($row->{$c});
- }
+ # create a document object
+ my $doc = Search::Estraier::Document->new;
- #$doc->add_attr('@cdate', fmt_date($row->{'date'}));
+ # add attributes to the document object
+ $doc->add_attr('@uri', $uri);
- # add the body text to the document object
- my $path = $row->{'filepath'};
- $doc->add_text($path);
- $path =~ s/(.)/$1 /g;
- $doc->add_hidden_text($path);
+ foreach my $c (@{ $sth->{NAME} }) {
+ print STDERR "attr $c = $row->{$c}\n" if ($debug > 2);
+ $doc->add_attr($c, $row->{$c}) if (defined($row->{$c}));
+ }
+
+ #$doc->add_attr('@cdate', fmt_date($row->{'date'}));
+
+ # add the body text to the document object
+ my $path = $row->{'filepath'};
+ $doc->add_text($path);
+ $path =~ s/(.)/$1 /g;
+ $doc->add_hidden_text($path);
- print STDERR $doc->dump_draft,"\n" if ($debug > 1);
+ print STDERR $doc->dump_draft,"\n" if ($debug > 1);
- # register the document object to the database
- $db->put_doc($doc, $HyperEstraier::Database::PDCLEAN);
+ # register the document object to the database
+ $hest_node->put_doc($doc) if ($hest_node);
- $i++;
- if ($i % $dot == 0) {
- print "$max ";
- $max--;
+ $added++;
}
- }
+ print "$added";
+
+ $offset += EST_CHUNK;
- print "sync";
- $db->sync();
- print " close";
- $db->close();
+ } while ($results == EST_CHUNK);
my $dur = (time() - $t) || 1;
printf(" [%.2f/s dur: %s]\n",
- ( $results / $dur ),
+ ( $added / $dur ),
fmt_time($dur)
);
-
- exit;
}
-###################################create tables############################3
+#---- /subs ----
+
+
+## update index ##
+if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) {
+ # update all
+ print "force update of Hyper Estraier index ";
+ print "by -i flag" if ($opt{i});
+ print "by -j flag" if ($opt{j});
+ print "\n";
+ hest_update();
+}
+## create tables ##
if ($opt{c}) {
sub do_index {
my $index = shift || return;
- my ($table,$col,$unique) = split(/_/, $index);
+ my ($table,$col,$unique) = split(/:/, $index);
$unique ||= '';
- $index =~ s/,/_/g;
+ $index =~ s/\W+/_/g;
+ print "$index on $table($col)" . ( $unique ? "u" : "" ) . " ";
$dbh->do(qq{ create $unique index $index on $table($col) });
}
print "creating tables...\n";
-
- $dbh->do(qq{
+
+ $dbh->do( qq{
create table hosts (
ID SERIAL PRIMARY KEY,
name VARCHAR(30) NOT NULL,
IP VARCHAR(15)
);
- });
-
- $dbh->do(qq{
+
create table shares (
ID SERIAL PRIMARY KEY,
hostID INTEGER NOT NULL references hosts(id),
name VARCHAR(30) NOT NULL,
- share VARCHAR(200) NOT NULL,
- localpath VARCHAR(200)
+ share VARCHAR(200) NOT NULL
);
- });
-
- $dbh->do(qq{
+
+ create table dvds (
+ ID SERIAL PRIMARY KEY,
+ num INTEGER NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ mjesto VARCHAR(255)
+ );
+
create table backups (
+ id serial,
hostID INTEGER NOT NULL references hosts(id),
num INTEGER NOT NULL,
date integer NOT NULL,
type CHAR(4) not null,
shareID integer not null references shares(id),
- size integer not null,
- PRIMARY KEY(hostID, num, shareID)
+ size bigint not null,
+ inc_size bigint not null default -1,
+ inc_deleted boolean default false,
+ parts integer not null default 0,
+ PRIMARY KEY(id)
);
- });
- #do_index('backups_hostid,num_unique');
+ create table files (
+ ID SERIAL,
+ shareID INTEGER NOT NULL references shares(id),
+ backupNum INTEGER NOT NULL,
+ name VARCHAR(255) NOT NULL,
+ path VARCHAR(255) NOT NULL,
+ date integer NOT NULL,
+ type INTEGER NOT NULL,
+ size bigint NOT NULL,
+ primary key(id)
+ );
- $dbh->do(qq{
- create table dvds (
- ID SERIAL PRIMARY KEY,
- num INTEGER NOT NULL,
- name VARCHAR(255) NOT NULL,
- mjesto VARCHAR(255)
+ create table archive (
+ id serial,
+ dvd_nr int not null,
+ total_size bigint default -1,
+ note text,
+ username varchar(20) not null,
+ date timestamp default now(),
+ primary key(id)
+ );
+
+ create table archive_backup (
+ archive_id int not null references archive(id) on delete cascade,
+ backup_id int not null references backups(id),
+ primary key(archive_id, backup_id)
);
- });
- $dbh->do(qq{
- create table files (
- ID SERIAL PRIMARY KEY,
- shareID INTEGER NOT NULL references shares(id),
- backupNum INTEGER NOT NULL,
- name VARCHAR(255) NOT NULL,
- path VARCHAR(255) NOT NULL,
- date integer NOT NULL,
- type INTEGER NOT NULL,
- size INTEGER NOT NULL,
- dvdid INTEGER references dvds(id)
+ create table archive_burned (
+ archive_id int references archive(id),
+ date timestamp default now(),
+ part int not null default 1,
+ copy int not null default 1,
+ iso_size bigint default -1
+ );
+
+ create table backup_parts (
+ id serial,
+ backup_id int references backups(id),
+ part_nr int not null check (part_nr > 0),
+ tar_size bigint not null check (tar_size > 0),
+ size bigint not null check (size > 0),
+ md5 text not null,
+ items int not null check (items > 0),
+ date timestamp default now(),
+ primary key(id)
);
});
- print "creating indexes:";
+ print "creating indexes: ";
foreach my $index (qw(
- hosts_name
- backups_hostID
- backups_num
- shares_hostID
- shares_name
- files_shareID
- files_path
- files_name
- files_date
- files_size
+ hosts:name
+ backups:hostID
+ backups:num
+ backups:shareID
+ shares:hostID
+ shares:name
+ files:shareID
+ files:path
+ files:name
+ files:date
+ files:size
+ archive:dvd_nr
+ archive_burned:archive_id
+ backup_parts:backup_id,part_nr:unique
)) {
- print " $index";
do_index($index);
}
+
+ print " creating sequence: ";
+ foreach my $seq (qw/dvd_nr/) {
+ print "$seq ";
+ $dbh->do( qq{ CREATE SEQUENCE $seq } );
+ }
+
+ print " creating triggers ";
+ $dbh->do( <<__END_OF_TRIGGER__ );
+
+create or replace function backup_parts_check() returns trigger as '
+declare
+ b_parts integer;
+ b_counted integer;
+ b_id integer;
+begin
+ -- raise notice ''old/new parts %/% backup_id %/%'', old.parts, new.parts, old.id, new.id;
+ if (TG_OP=''UPDATE'') then
+ b_id := new.id;
+ b_parts := new.parts;
+ elsif (TG_OP = ''INSERT'') then
+ b_id := new.id;
+ b_parts := new.parts;
+ end if;
+ b_counted := (select count(*) from backup_parts where backup_id = b_id);
+ -- raise notice ''backup % parts %'', b_id, b_parts;
+ if ( b_parts != b_counted ) then
+ raise exception ''Update of backup % aborted, requested % parts and there are really % parts'', b_id, b_parts, b_counted;
+ end if;
+ return null;
+end;
+' language plpgsql;
+
+create trigger do_backup_parts_check
+ after insert or update or delete on backups
+ for each row execute procedure backup_parts_check();
+
+create or replace function backup_backup_parts_check() returns trigger as '
+declare
+ b_id integer;
+ my_part_nr integer;
+ calc_part integer;
+begin
+ if (TG_OP = ''INSERT'') then
+ -- raise notice ''trigger: % backup_id %'', TG_OP, new.backup_id;
+ b_id = new.backup_id;
+ my_part_nr = new.part_nr;
+ execute ''update backups set parts = parts + 1 where id = '' || b_id;
+ elsif (TG_OP = ''DELETE'') then
+ -- raise notice ''trigger: % backup_id %'', TG_OP, old.backup_id;
+ b_id = old.backup_id;
+ my_part_nr = old.part_nr;
+ execute ''update backups set parts = parts - 1 where id = '' || b_id;
+ end if;
+ calc_part := (select count(part_nr) from backup_parts where backup_id = b_id);
+ if ( my_part_nr != calc_part ) then
+ raise exception ''Update of backup_parts with backup_id % aborted, requested part_nr is % and calulated next is %'', b_id, my_part_nr, calc_part;
+ end if;
+ return null;
+end;
+' language plpgsql;
+
+create trigger do_backup_backup_parts_check
+ after insert or update or delete on backup_parts
+ for each row execute procedure backup_backup_parts_check();
+
+__END_OF_TRIGGER__
+
print "...\n";
$dbh->commit;
}
+## delete data before inseting ##
if ($opt{d}) {
print "deleting ";
foreach my $table (qw(files dvds backups shares hosts)) {
$dbh->commit;
}
-#################################INSERT VALUES#############################
+## insert new values ##
# get hosts
$hosts = $bpc->HostInfoRead();
$sth->{insert_backups} = $dbh->prepare(qq{
INSERT INTO backups (hostID, num, date, type, shareid, size)
-VALUES (?,?,?,?,?,?)
+VALUES (?,?,?,?,?,-1)
+});
+
+$sth->{update_backups_size} = $dbh->prepare(qq{
+UPDATE backups SET size = ?
+WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ?
});
$sth->{insert_files} = $dbh->prepare(qq{
VALUES (?,?,?,?,?,?,?)
});
-foreach my $host_key (keys %{$hosts}) {
+my @hosts = keys %{$hosts};
+my $host_nr = 0;
+
+foreach my $host_key (@hosts) {
my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key";
$hostID = $dbh->last_insert_id(undef,undef,'hosts',undef);
}
- print "host ".$hosts->{$host_key}->{'host'}.": ";
-
+ $host_nr++;
# get backups for a host
my @backups = $bpc->BackupInfoRead($hostname);
my $incs = scalar @backups;
- print "$incs increments\n";
+ my $host_header = sprintf("host %s [%d/%d]: %d increments\n",
+ $hosts->{$host_key}->{'host'},
+ $host_nr,
+ ($#hosts + 1),
+ $incs
+ );
+ print $host_header unless ($opt{q});
+
my $inc_nr = 0;
$beenThere = {};
my $backupNum = $backup->{'num'};
my @backupShares = ();
- printf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
+ my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n",
$hosts->{$host_key}->{'host'},
$inc_nr, $incs, $backupNum,
$backup->{type} || '?',
strftime($t_fmt,localtime($backup->{startTime})),
fmt_time($backup->{endTime} - $backup->{startTime})
);
+ print $share_header unless ($opt{q});
my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1);
foreach my $share ($files->shareList($backupNum)) {
# skip if allready in database!
next if ($count > 0);
+ # dump host and share header for -q
+ if ($opt{q}) {
+ if ($host_header) {
+ print $host_header;
+ $host_header = undef;
+ }
+ print $share_header;
+ }
+
# dump some log
print curr_time," ", $share;
- my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
-
$sth->{insert_backups}->execute(
$hostID,
$backupNum,
$backup->{'endTime'},
- $backup->{'type'},
+ substr($backup->{'type'},0,4),
$shareID,
- $size,
);
- print " commit";
- $dbh->commit();
+ my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
+
+ eval {
+ $sth->{update_backups_size}->execute(
+ $size,
+ $hostID,
+ $backupNum,
+ $backup->{'endTime'},
+ substr($backup->{'type'},0,4),
+ $shareID,
+ );
+ print " commit";
+ $dbh->commit();
+ };
+ if ($@) {
+ print " rollback";
+ $dbh->rollback();
+ }
my $dur = (time() - $t) || 1;
printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",
( ($f+$d) / $dur ),
fmt_time($dur)
);
+
+ hest_update($hostID, $shareID, $backupNum) if ($nf + $nd > 0);
}
}
$sth->{insert_share} ||= $dbh->prepare(qq{
INSERT INTO shares
- (hostID,name,share,localpath)
- VALUES (?,?,?,?)
+ (hostID,name,share)
+ VALUES (?,?,?)
});
my $drop_down = $hostname . '/' . $share;
$drop_down =~ s#//+#/#g;
- $sth->{insert_share}->execute($hostID,$share, $drop_down ,undef);
+ $sth->{insert_share}->execute($hostID,$share, $drop_down);
return $dbh->last_insert_id(undef,undef,'shares',undef);
}
SELECT 1 FROM files
WHERE shareID = ? and
path = ? and
- date = ? and
- size = ?
+ size = ? and
+ ( date = ? or date = ? or date = ? )
LIMIT 1
});
- my @param = ($shareID,$path,$date,$size);
+ my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset);
$sth->{file_in_db}->execute(@param);
my $rows = $sth->{file_in_db}->rows;
print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3);
$filesInBackup->{$path_key}->{'size'}
));
+ my $key_dst_prev = join(" ", (
+ $shareID,
+ $dir,
+ $path_key,
+ $filesInBackup->{$path_key}->{'mtime'} - $dst_offset,
+ $filesInBackup->{$path_key}->{'size'}
+ ));
+
+ my $key_dst_next = join(" ", (
+ $shareID,
+ $dir,
+ $path_key,
+ $filesInBackup->{$path_key}->{'mtime'} + $dst_offset,
+ $filesInBackup->{$path_key}->{'size'}
+ ));
+
my $found;
- if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) {
+ if (
+ ! defined($beenThere->{$key}) &&
+ ! defined($beenThere->{$key_dst_prev}) &&
+ ! defined($beenThere->{$key_dst_next}) &&
+ ! ($found = found_in_db($key, @data))
+ ) {
print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2);
if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {