X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=bin%2FBackupPC_updatedb;h=b052b334fa8c9bbf0638160e00472b71435d05fa;hb=9cb8eb2c382eac305e8210d450694b337b5ca1e2;hp=125b6f50fd42f4c347c7fa90ed19f481733f1565;hpb=10ec8f9f61081ce133ea03e38bbf23ede247a676;p=BackupPC.git diff --git a/bin/BackupPC_updatedb b/bin/BackupPC_updatedb index 125b6f5..b052b33 100755 --- a/bin/BackupPC_updatedb +++ b/bin/BackupPC_updatedb @@ -12,16 +12,25 @@ use Time::HiRes qw/time/; use File::Pid; use POSIX qw/strftime/; use BackupPC::SearchLib; +use Cwd qw/abs_path/; use constant BPC_FTYPE_DIR => 5; use constant EST_CHUNK => 100000; +# daylight saving time change offset for 1h +my $dst_offset = 60 * 60; + my $debug = 0; $|=1; my $start_t = time(); -my $pidfile = new File::Pid; +my $pid_path = abs_path($0); +$pid_path =~ s/\W+/_/g; + +my $pidfile = new File::Pid({ + file => "/tmp/$pid_path", +}); if (my $pid = $pidfile->running ) { die "$0 already running: $pid\n"; @@ -29,8 +38,8 @@ if (my $pid = $pidfile->running ) { $pidfile->remove; $pidfile = new File::Pid; } -$pidfile->write; print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n"; +$pidfile->write; my $t_fmt = '%Y-%m-%d %H:%M:%S'; @@ -43,16 +52,15 @@ my $beenThere = {}; my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n"; my $user = $Conf{SearchUser} || ''; -my $use_hest = $Conf{HyperEstraierIndex}; -my ($index_path, $index_node_url) = BackupPC::SearchLib::getHyperEstraier_url($use_hest); +my $index_node_url = $Conf{HyperEstraierIndex}; my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 }); my %opt; -if ( !getopts("cdm:v:ij", \%opt ) ) { +if ( !getopts("cdm:v:ijf", \%opt ) ) { print STDERR <new(); - $hest_db->open($TopDir . $index_path, $HyperEstraier::Database::DBWRITER | $HyperEstraier::Database::DBCREAT); - print " directly"; - } elsif ($index_node_url) { - $hest_node ||= HyperEstraier::Node->new($index_node_url); + print " opening index $index_node_url"; + if ($index_node_url) { + $hest_node ||= Search::Estraier::Node->new($index_node_url); $hest_node->set_auth('admin', 'admin'); print " via node URL"; } else { - die "don't know how to use HyperEstraier Index $use_hest"; + die "don't know how to use Hyper Estraier Index $index_node_url"; } - print " increment is " . EST_CHUNK . " files:"; my $results = 0; @@ -143,7 +154,7 @@ sub hest_update { my $where = ''; my @data; - if ($host_id && $share_id && $num) { + if (defined($host_id) && defined($share_id) && defined($num)) { $where = qq{ WHERE hosts.id = ? AND @@ -183,6 +194,8 @@ sub hest_update { if ($results == 0) { print " - no new files\n"; last; + } else { + print " - $results files: "; } sub fmt_date { @@ -203,13 +216,14 @@ sub hest_update { } # create a document object - my $doc = HyperEstraier::Document->new; + my $doc = Search::Estraier::Document->new; # add attributes to the document object $doc->add_attr('@uri', $uri); foreach my $c (@{ $sth->{NAME} }) { - $doc->add_attr($c, $row->{$c}) if ($row->{$c}); + print STDERR "attr $c = $row->{$c}\n" if ($debug > 2); + $doc->add_attr($c, $row->{$c}) if (defined($row->{$c})); } #$doc->add_attr('@cdate', fmt_date($row->{'date'})); @@ -223,9 +237,7 @@ sub hest_update { print STDERR $doc->dump_draft,"\n" if ($debug > 1); # register the document object to the database - if ($hest_db) { - $hest_db->put_doc($doc, $HyperEstraier::Database::PDCLEAN); - } elsif ($hest_node) { + if ($hest_node) { $hest_node->put_doc($doc); } else { die "not supported"; @@ -234,17 +246,11 @@ sub hest_update { } print " $added"; - $hest_db->sync() if ($index_path); $offset += EST_CHUNK; } while ($results == EST_CHUNK); - if ($index_path) { - print ", close"; - $hest_db->close(); - } - my $dur = (time() - $t) || 1; printf(" [%.2f/s dur: %s]\n", ( $added / $dur ), @@ -256,10 +262,9 @@ sub hest_update { ## update index ## -if (($opt{i} || $opt{j} || ($index_path && ! -e $index_path)) && !$opt{c}) { +if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) { # update all - print "force update of HyperEstraier index "; - print "importing existing data" unless (-e $index_path); + print "force update of Hyper Estraier index "; print "by -i flag" if ($opt{i}); print "by -j flag" if ($opt{j}); print "\n"; @@ -278,34 +283,28 @@ if ($opt{c}) { } print "creating tables...\n"; - - $dbh->do(qq{ + + $dbh->do( qq{ create table hosts ( ID SERIAL PRIMARY KEY, name VARCHAR(30) NOT NULL, IP VARCHAR(15) ); - }); - - $dbh->do(qq{ + create table shares ( ID SERIAL PRIMARY KEY, hostID INTEGER NOT NULL references hosts(id), name VARCHAR(30) NOT NULL, share VARCHAR(200) NOT NULL ); - }); - $dbh->do(qq{ create table dvds ( ID SERIAL PRIMARY KEY, num INTEGER NOT NULL, name VARCHAR(255) NOT NULL, mjesto VARCHAR(255) ); - }); - - $dbh->do(qq{ + create table backups ( id serial, hostID INTEGER NOT NULL references hosts(id), @@ -316,11 +315,10 @@ if ($opt{c}) { size bigint not null, inc_size bigint not null default -1, inc_deleted boolean default false, + parts integer not null default 1, PRIMARY KEY(id) ); - }); - $dbh->do(qq{ create table files ( ID SERIAL, shareID INTEGER NOT NULL references shares(id), @@ -332,10 +330,7 @@ if ($opt{c}) { size bigint NOT NULL, primary key(id) ); - }); - - $dbh->do( qq{ create table archive ( id serial, dvd_nr int not null, @@ -345,16 +340,32 @@ if ($opt{c}) { date timestamp default now(), primary key(id) ); - } - ); - $dbh->do( qq{ - create table archive_backup - ( + create table archive_backup ( archive_id int not null references archive(id) on delete cascade, backup_id int not null references backups(id), primary key(archive_id, backup_id) ); + + create table archive_burned ( + archive_id int references archive(id), + date timestamp default now(), + part int not null default 1, + copy int not null default 1, + iso_size bigint default -1 + ); + + create table backup_parts ( + id serial, + backup_id int references backups(id), + part_nr int not null check (part_nr > 0), + tar_size bigint not null check (tar_size > 0), + size bigint not null check (size > 0), + md5 text not null, + items int not null check (items > 0), + date timestamp default now(), + primary key(id) + ); }); print "creating indexes: "; @@ -372,6 +383,8 @@ if ($opt{c}) { files:date files:size archive:dvd_nr + archive_burned:archive_id + backup_parts:backup_id,part_nr )) { do_index($index); } @@ -426,7 +439,12 @@ WHERE hostID=? AND num=? AND shareid=? $sth->{insert_backups} = $dbh->prepare(qq{ INSERT INTO backups (hostID, num, date, type, shareid, size) -VALUES (?,?,?,?,?,?) +VALUES (?,?,?,?,?,-1) +}); + +$sth->{update_backups_size} = $dbh->prepare(qq{ +UPDATE backups SET size = ? +WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ? }); $sth->{insert_files} = $dbh->prepare(qq{ @@ -435,7 +453,10 @@ INSERT INTO files VALUES (?,?,?,?,?,?,?) }); -foreach my $host_key (keys %{$hosts}) { +my @hosts = keys %{$hosts}; +my $host_nr = 0; + +foreach my $host_key (@hosts) { my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key"; @@ -450,7 +471,9 @@ foreach my $host_key (keys %{$hosts}) { $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef); } - print "host ".$hosts->{$host_key}->{'host'}.": "; + $host_nr++; + print "host ", $hosts->{$host_key}->{'host'}, " [", + $host_nr, "/", ($#hosts + 1), "]: "; # get backups for a host my @backups = $bpc->BackupInfoRead($hostname); @@ -492,19 +515,32 @@ foreach my $host_key (keys %{$hosts}) { # dump some log print curr_time," ", $share; - my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); - $sth->{insert_backups}->execute( $hostID, $backupNum, $backup->{'endTime'}, substr($backup->{'type'},0,4), $shareID, - $size, ); - print " commit"; - $dbh->commit(); + my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID); + + eval { + $sth->{update_backups_size}->execute( + $size, + $hostID, + $backupNum, + $backup->{'endTime'}, + substr($backup->{'type'},0,4), + $shareID, + ); + print " commit"; + $dbh->commit(); + }; + if ($@) { + print " rollback"; + $dbh->rollback(); + } my $dur = (time() - $t) || 1; printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n", @@ -567,12 +603,12 @@ sub found_in_db { SELECT 1 FROM files WHERE shareID = ? and path = ? and - date = ? and - size = ? + size = ? and + ( date = ? or date = ? or date = ? ) LIMIT 1 }); - my @param = ($shareID,$path,$date,$size); + my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset); $sth->{file_in_db}->execute(@param); my $rows = $sth->{file_in_db}->rows; print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3); @@ -622,8 +658,29 @@ sub recurseDir($$$$$$$$) { $filesInBackup->{$path_key}->{'size'} )); + my $key_dst_prev = join(" ", ( + $shareID, + $dir, + $path_key, + $filesInBackup->{$path_key}->{'mtime'} - $dst_offset, + $filesInBackup->{$path_key}->{'size'} + )); + + my $key_dst_next = join(" ", ( + $shareID, + $dir, + $path_key, + $filesInBackup->{$path_key}->{'mtime'} + $dst_offset, + $filesInBackup->{$path_key}->{'size'} + )); + my $found; - if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) { + if ( + ! defined($beenThere->{$key}) && + ! defined($beenThere->{$key_dst_prev}) && + ! defined($beenThere->{$key_dst_next}) && + ! ($found = found_in_db($key, @data)) + ) { print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2); if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {