X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=bin%2FBackupPC_updatedb;h=8fd5dd2575c4c4f8619fdc242f436e561fff4b29;hb=58a68f96114439f61c870061f2d2a6185e90121e;hp=861e49508743717111099fceec5eeab58c30cd2e;hpb=eb1b724fe866250a04a2f5824bf541b1730c2315;p=BackupPC.git diff --git a/bin/BackupPC_updatedb b/bin/BackupPC_updatedb index 861e495..8fd5dd2 100755 --- a/bin/BackupPC_updatedb +++ b/bin/BackupPC_updatedb @@ -11,8 +11,13 @@ use Getopt::Std; use Time::HiRes qw/time/; use File::Pid; use POSIX qw/strftime/; +use BackupPC::SearchLib; use constant BPC_FTYPE_DIR => 5; +use constant EST_CHUNK => 100000; + +# daylight saving time change offset for 1h +my $dst_offset = 60 * 60; my $debug = 0; $|=1; @@ -40,22 +45,33 @@ my $beenThere = {}; my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n"; my $user = $Conf{SearchUser} || ''; -my $index_path = $Conf{HyperEstraierIndex}; + +my $use_hest = $Conf{HyperEstraierIndex}; +my ($index_path, $index_node_url) = BackupPC::SearchLib::getHyperEstraier_url($use_hest); my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 }); my %opt; -if ( !getopts("cdm:v:i", \%opt ) ) { +if ( !getopts("cdm:v:ijf", \%opt ) ) { print STDERR <sync(); + $hest_db->close(); + } + exit(0); +} - my $t = time(); - - my $sth = $dbh->prepare(qq{ - SELECT - files.id AS fid, - hosts.name AS hname, - shares.name AS sname, - -- shares.share AS sharename, - files.backupnum AS backupnum, - -- files.name AS filename, - files.path AS filepath, - files.date AS date, - files.type AS filetype, - files.size AS size, - files.shareid AS shareid, - backups.date AS backup_date - FROM files - INNER JOIN shares ON files.shareID=shares.ID - INNER JOIN hosts ON hosts.ID = shares.hostID - INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID - }); +$SIG{'INT'} = \&signal; +$SIG{'QUIT'} = \&signal; - $sth->execute(); - my $results = $sth->rows; +sub hest_update { - my $dot = int($results / 15); + my ($host_id, $share_id, $num) = @_; - print " $results ($dot/#)"; + my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n"; - sub fmt_date { - my $t = shift || return; - my $iso = BackupPC::Lib::timeStamp($t); - $iso =~ s/\s/T/; - return $iso; + unless (defined($use_hest)) { + print STDERR "HyperEstraier support not enabled in configuration\n"; + $use_hest = 0; + return; } - my $i = 0; - my $max = int($results / $dot); + print curr_time," updating HyperEstraier:"; - $index_path = $TopDir . '/' . $index_path; - $index_path =~ s#//#/#g; + my $t = time(); - print " index $index_path..."; - use HyperEstraier; - my $db = HyperEstraier::Database->new(); - $db->open($index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT); + my $offset = 0; + my $added = 0; + + print " opening index $use_hest"; + if ($index_path) { + $hest_db = HyperEstraier::Database->new(); + $hest_db->open($TopDir . $index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT); + print " directly"; + } elsif ($index_node_url) { + $hest_node ||= HyperEstraier::Node->new($index_node_url); + $hest_node->set_auth('admin', 'admin'); + print " via node URL"; + } else { + die "don't know how to use HyperEstraier Index $use_hest"; + } + print " increment is " . EST_CHUNK . " files:"; + + my $results = 0; + + do { + + my $where = ''; + my @data; + if (defined($host_id) && defined($share_id) && defined($num)) { + $where = qq{ + WHERE + hosts.id = ? AND + shares.id = ? AND + files.backupnum = ? + }; + @data = ( $host_id, $share_id, $num ); + } + my $limit = sprintf('LIMIT '.EST_CHUNK.' OFFSET %d', $offset); + + my $sth = $dbh->prepare(qq{ + SELECT + files.id AS fid, + hosts.name AS hname, + shares.name AS sname, + -- shares.share AS sharename, + files.backupnum AS backupnum, + -- files.name AS filename, + files.path AS filepath, + files.date AS date, + files.type AS type, + files.size AS size, + files.shareid AS shareid, + backups.date AS backup_date + FROM files + INNER JOIN shares ON files.shareID=shares.ID + INNER JOIN hosts ON hosts.ID = shares.hostID + INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID + $where + $limit + }); + + $sth->execute(@data); + $results = $sth->rows; + + if ($results == 0) { + print " - no new files\n"; + last; + } - while (my $row = $sth->fetchrow_hashref()) { + sub fmt_date { + my $t = shift || return; + my $iso = BackupPC::Lib::timeStamp($t); + $iso =~ s/\s/T/; + return $iso; + } - # create a document object - my $doc = HyperEstraier::Document->new; + while (my $row = $sth->fetchrow_hashref()) { - # add attributes to the document object - $doc->add_attr('@uri', 'file:///' . $row->{'fid'}); + my $fid = $row->{'fid'} || die "no fid?"; + my $uri = 'file:///' . $fid; - foreach my $c (@{ $sth->{NAME} }) { - $doc->add_attr($c, $row->{$c}) if ($row->{$c}); - } + unless ($skip_check) { + my $id = ($hest_db || $hest_node)->uri_to_id($uri); + next unless ($id == -1); + } + + # create a document object + my $doc = HyperEstraier::Document->new; - #$doc->add_attr('@cdate', fmt_date($row->{'date'})); + # add attributes to the document object + $doc->add_attr('@uri', $uri); + + foreach my $c (@{ $sth->{NAME} }) { + $doc->add_attr($c, $row->{$c}) if (defined($row->{$c})); + } - # add the body text to the document object - my $path = $row->{'filepath'}; - $doc->add_text($path); - $path =~ s/(.)/$1 /g; - $doc->add_hidden_text($path); + #$doc->add_attr('@cdate', fmt_date($row->{'date'})); - print STDERR $doc->dump_draft,"\n" if ($debug > 1); + # add the body text to the document object + my $path = $row->{'filepath'}; + $doc->add_text($path); + $path =~ s/(.)/$1 /g; + $doc->add_hidden_text($path); - # register the document object to the database - $db->put_doc($doc, $HyperEstraier::Database::PDCLEAN); + print STDERR $doc->dump_draft,"\n" if ($debug > 1); - $i++; - if ($i % $dot == 0) { - print "$max "; - $max--; + # register the document object to the database + if ($hest_db) { + $hest_db->put_doc($doc, $HyperEstraier::Database::PDCLEAN); + } elsif ($hest_node) { + $hest_node->put_doc($doc); + } else { + die "not supported"; + } + $added++; } - } + print " $added"; + $hest_db->sync() if ($index_path); - print "sync"; - $db->sync(); - print " close"; - $db->close(); + $offset += EST_CHUNK; + + } while ($results == EST_CHUNK); + + if ($index_path) { + print ", close"; + $hest_db->close(); + } my $dur = (time() - $t) || 1; printf(" [%.2f/s dur: %s]\n", - ( $results / $dur ), + ( $added / $dur ), fmt_time($dur) ); - - exit; } -###################################create tables############################3 +#---- /subs ---- + + +## update index ## +if (($opt{i} || $opt{j} || ($index_path && ! -e $TopDir . $index_path)) && !$opt{c}) { + # update all + print "force update of HyperEstraier index "; + print "importing existing data" unless (-e $TopDir . $index_path); + print "by -i flag" if ($opt{i}); + print "by -j flag" if ($opt{j}); + print "\n"; + hest_update(); +} +## create tables ## if ($opt{c}) { sub do_index { my $index = shift || return; - my ($table,$col,$unique) = split(/_/, $index); + my ($table,$col,$unique) = split(/:/, $index); $unique ||= ''; - $index =~ s/,/_/g; + $index =~ s/\W+/_/g; + print "$index on $table($col)" . ( $unique ? "u" : "" ) . " "; $dbh->do(qq{ create $unique index $index on $table($col) }); } print "creating tables...\n"; - - $dbh->do(qq{ + + $dbh->do( qq{ create table hosts ( ID SERIAL PRIMARY KEY, name VARCHAR(30) NOT NULL, IP VARCHAR(15) ); - }); - - $dbh->do(qq{ + create table shares ( ID SERIAL PRIMARY KEY, hostID INTEGER NOT NULL references hosts(id), name VARCHAR(30) NOT NULL, - share VARCHAR(200) NOT NULL, - localpath VARCHAR(200) + share VARCHAR(200) NOT NULL ); - }); - - $dbh->do(qq{ + + create table dvds ( + ID SERIAL PRIMARY KEY, + num INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + mjesto VARCHAR(255) + ); + create table backups ( + id serial, hostID INTEGER NOT NULL references hosts(id), num INTEGER NOT NULL, date integer NOT NULL, type CHAR(4) not null, shareID integer not null references shares(id), - size integer not null, - PRIMARY KEY(hostID, num, shareID) + size bigint not null, + inc_size bigint not null default -1, + inc_deleted boolean default false, + parts integer not null default 1, + PRIMARY KEY(id) ); - }); - #do_index('backups_hostid,num_unique'); + create table files ( + ID SERIAL, + shareID INTEGER NOT NULL references shares(id), + backupNum INTEGER NOT NULL, + name VARCHAR(255) NOT NULL, + path VARCHAR(255) NOT NULL, + date integer NOT NULL, + type INTEGER NOT NULL, + size bigint NOT NULL, + primary key(id) + ); - $dbh->do(qq{ - create table dvds ( - ID SERIAL PRIMARY KEY, - num INTEGER NOT NULL, - name VARCHAR(255) NOT NULL, - mjesto VARCHAR(255) + create table archive ( + id serial, + dvd_nr int not null, + total_size bigint default -1, + note text, + username varchar(20) not null, + date timestamp default now(), + primary key(id) + ); + + create table archive_backup ( + archive_id int not null references archive(id) on delete cascade, + backup_id int not null references backups(id), + primary key(archive_id, backup_id) ); - }); - $dbh->do(qq{ - create table files ( - ID SERIAL PRIMARY KEY, - shareID INTEGER NOT NULL references shares(id), - backupNum INTEGER NOT NULL, - name VARCHAR(255) NOT NULL, - path VARCHAR(255) NOT NULL, - date integer NOT NULL, - type INTEGER NOT NULL, - size INTEGER NOT NULL, - dvdid INTEGER references dvds(id) + create table archive_burned ( + archive_id int references archive(id), + date timestamp default now(), + part int not null default 1, + copy int not null default 1, + iso_size bigint default -1 + ); + + create table backup_parts ( + id serial, + backup_id int references backups(id), + part_nr int not null check (part_nr > 0), + tar_size bigint not null check (tar_size > 0), + size bigint not null check (size > 0), + md5 text not null, + items int not null check (items > 0), + date timestamp default now(), + primary key(id) ); }); - print "creating indexes:"; + print "creating indexes: "; foreach my $index (qw( - hosts_name - backups_hostID - backups_num - shares_hostID - shares_name - files_shareID - files_path - files_name - files_date - files_size + hosts:name + backups:hostID + backups:num + backups:shareID + shares:hostID + shares:name + files:shareID + files:path + files:name + files:date + files:size + archive:dvd_nr + archive_burned:archive_id + backup_parts:backup_id,part_nr )) { - print " $index"; do_index($index); } + + print " creating sequence: "; + foreach my $seq (qw/dvd_nr/) { + print "$seq "; + $dbh->do( qq{ CREATE SEQUENCE $seq } ); + } + + print "...\n"; $dbh->commit; } +## delete data before inseting ## if ($opt{d}) { print "deleting "; foreach my $table (qw(files dvds backups shares hosts)) { @@ -284,7 +420,7 @@ if ($opt{d}) { $dbh->commit; } -#################################INSERT VALUES############################# +## insert new values ## # get hosts $hosts = $bpc->HostInfoRead(); @@ -309,7 +445,12 @@ WHERE hostID=? AND num=? AND shareid=? $sth->{insert_backups} = $dbh->prepare(qq{ INSERT INTO backups (hostID, num, date, type, shareid, size) -VALUES (?,?,?,?,?,?) +VALUES (?,?,?,?,?,-1) +}); + +$sth->{update_backups_size} = $dbh->prepare(qq{ +UPDATE backups SET size = ? +WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ? }); $sth->{insert_files} = $dbh->prepare(qq{ @@ -318,7 +459,10 @@ INSERT INTO files VALUES (?,?,?,?,?,?,?) }); -foreach my $host_key (keys %{$hosts}) { +my @hosts = keys %{$hosts}; +my $host_nr = 0; + +foreach my $host_key (@hosts) { my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key"; @@ -333,7 +477,9 @@ foreach my $host_key (keys %{$hosts}) { $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef); } - print "host ".$hosts->{$host_key}->{'host'}.": "; + $host_nr++; + print "host ", $hosts->{$host_key}->{'host'}, " [", + $host_nr, "/", ($#hosts + 1), "]: "; # get backups for a host my @backups = $bpc->BackupInfoRead($hostname); @@ -375,15 +521,23 @@ foreach my $host_key (keys %{$hosts}) { # dump some log print curr_time," ", $share; - my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); - $sth->{insert_backups}->execute( $hostID, $backupNum, $backup->{'endTime'}, - $backup->{'type'}, + substr($backup->{'type'},0,4), $shareID, + ); + + my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); + + $sth->{update_backups_size}->execute( $size, + $hostID, + $backupNum, + $backup->{'endTime'}, + substr($backup->{'type'},0,4), + $shareID, ); print " commit"; @@ -396,6 +550,8 @@ foreach my $host_key (keys %{$hosts}) { ( ($f+$d) / $dur ), fmt_time($dur) ); + + hest_update($hostID, $shareID, $backupNum) if ($nf + $nd > 0); } } @@ -424,14 +580,14 @@ sub getShareID() { $sth->{insert_share} ||= $dbh->prepare(qq{ INSERT INTO shares - (hostID,name,share,localpath) - VALUES (?,?,?,?) + (hostID,name,share) + VALUES (?,?,?) }); my $drop_down = $hostname . '/' . $share; $drop_down =~ s#//+#/#g; - $sth->{insert_share}->execute($hostID,$share, $drop_down ,undef); + $sth->{insert_share}->execute($hostID,$share, $drop_down); return $dbh->last_insert_id(undef,undef,'shares',undef); } @@ -448,12 +604,12 @@ sub found_in_db { SELECT 1 FROM files WHERE shareID = ? and path = ? and - date = ? and - size = ? + size = ? and + ( date = ? or date = ? or date = ? ) LIMIT 1 }); - my @param = ($shareID,$path,$date,$size); + my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset); $sth->{file_in_db}->execute(@param); my $rows = $sth->{file_in_db}->rows; print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3); @@ -503,8 +659,29 @@ sub recurseDir($$$$$$$$) { $filesInBackup->{$path_key}->{'size'} )); + my $key_dst_prev = join(" ", ( + $shareID, + $dir, + $path_key, + $filesInBackup->{$path_key}->{'mtime'} - $dst_offset, + $filesInBackup->{$path_key}->{'size'} + )); + + my $key_dst_next = join(" ", ( + $shareID, + $dir, + $path_key, + $filesInBackup->{$path_key}->{'mtime'} + $dst_offset, + $filesInBackup->{$path_key}->{'size'} + )); + my $found; - if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) { + if ( + ! defined($beenThere->{$key}) && + ! defined($beenThere->{$key_dst_prev}) && + ! defined($beenThere->{$key_dst_next}) && + ! ($found = found_in_db($key, @data)) + ) { print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2); if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {