X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=blobdiff_plain;f=bin%2FBackupPC_ASA_SearchUpdate;h=b921594645559879768a9574ea2ab9217812bda8;hp=5cff61f6f0ebeec4b76ad5d8da307d33efa6c8f8;hb=b34c67f0bd159364d2fada4930d53987e78047fc;hpb=a46edaf622b0e54732428bb1302a03af22b682c7;ds=sidebyside diff --git a/bin/BackupPC_ASA_SearchUpdate b/bin/BackupPC_ASA_SearchUpdate index 5cff61f..b921594 100755 --- a/bin/BackupPC_ASA_SearchUpdate +++ b/bin/BackupPC_ASA_SearchUpdate @@ -7,7 +7,7 @@ use DBI; use BackupPC::Lib; use BackupPC::View; use Data::Dumper; -use Getopt::Std; +use Getopt::Long::Descriptive; use Time::HiRes qw/time/; use File::Pid; use POSIX qw/strftime/; @@ -57,42 +57,40 @@ my $index_node_url = $Conf{HyperEstraierIndex}; my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 }); -my %opt; +my ($opt,$usage) = describe_options( +"%c %o", +[ 'create|c', "create database on first use" ], +[ 'delete|d', "delete database before import" ], +[ 'max|m=i', "import just max increments for one host" ], +[ 'host|h=s@', "import just host(s)" ], +[], +[ 'verbose|v:i', 'set verbosity (debug) level' ], +[ 'index|i', 'update full text index' ], +[ 'junk|j', "update full text, don't check existing files" ], +[ 'fast|f', "don't do anything with full text index" ], +[ 'quiet|q', "be quiet for hosts without changes" ], +[ 'help', "show help" ], +); + +print($usage->text), exit if $opt->help; + +warn "hosts: ",dump( $opt->host ); -if ( !getopts("cdm:v:ijfq", \%opt ) ) { - print STDERR <junk && print STDERR "Skipping check for existing files -- this should be used only with initital import\n"; print curr_time," updating fulltext:"; @@ -185,6 +183,7 @@ sub hest_update { } print "$added"; + status "| $added"; $offset += EST_CHUNK; @@ -203,17 +202,17 @@ sub hest_update { ## update index ## -if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) { +if ( ( $opt->index || $opt->junk ) && !$opt->create ) { # update all print "force update of Hyper Estraier index "; - print "by -i flag" if ($opt{i}); - print "by -j flag" if ($opt{j}); + print "by -i flag" if ($opt->index); + print "by -j flag" if ($opt->junk); print "\n"; hest_update(); } ## create tables ## -if ($opt{c}) { +if ($opt->create) { sub do_index { my $index = shift || return; my ($table,$col,$unique) = split(/:/, $index); @@ -225,7 +224,7 @@ if ($opt{c}) { print "creating tables...\n"; - $dbh->do( qq{ + foreach my $sql ( split(/;/, qq{ create table hosts ( ID SERIAL PRIMARY KEY, name VARCHAR(30) NOT NULL, @@ -260,6 +259,19 @@ if ($opt{c}) { PRIMARY KEY(id) ); + create table backup_parts ( + id serial, + backup_id int references backups(id), + part_nr int not null check (part_nr > 0), + tar_size bigint not null check (tar_size > 0), + size bigint not null check (size > 0), + md5 text not null, + items int not null check (items > 0), + date timestamp default now(), + filename text not null, + primary key(id) + ); + create table files ( ID SERIAL, shareID INTEGER NOT NULL references shares(id), @@ -282,10 +294,11 @@ if ($opt{c}) { primary key(id) ); - create table archive_backup ( + create table archive_parts ( archive_id int not null references archive(id) on delete cascade, - backup_id int not null references backups(id), - primary key(archive_id, backup_id) + backup_id int not null references backups(id), + backup_part_id int not null references backup_parts(id), + primary key(archive_id, backup_id, backup_part_id) ); create table archive_burned ( @@ -296,40 +309,31 @@ if ($opt{c}) { iso_size bigint default -1 ); - create table backup_parts ( - id serial, - backup_id int references backups(id), - part_nr int not null check (part_nr > 0), - tar_size bigint not null check (tar_size > 0), - size bigint not null check (size > 0), - md5 text not null, - items int not null check (items > 0), - date timestamp default now(), - primary key(id) - ); - -- report backups and corresponding dvd - - create view backups_on_dvds as - select - backups.id as id, - hosts.name || ':' || shares.name as share, - backups.num as num, - backups.type as type, - abstime(backups.date) as backup_date, - backups.size as size, - backups.inc_size as gzip_size, - archive.id as archive_id, - archive.dvd_nr - from backups - join shares on backups.shareid=shares.id - join hosts on shares.hostid = hosts.id - left outer join archive_backup on backups.id = archive_backup.backup_id - left outer join archive on archive_backup.archive_id = archive.id - where backups.parts > 0 and size > 0 - order by backups.date - ; - }); +-- +-- create view backups_on_dvds as +-- select +-- backups.id as id, +-- hosts.name || ':' || shares.name as share, +-- backups.num as num, +-- backups.type as type, +-- abstime(backups.date) as backup_date, +-- backups.size as size, +-- backups.inc_size as gzip_size, +-- archive.id as archive_id, +-- archive.dvd_nr +-- from backups +-- join shares on backups.shareid=shares.id +-- join hosts on shares.hostid = hosts.id +-- left outer join archive_backup on backups.id = archive_backup.backup_id +-- left outer join archive on archive_backup.archive_id = archive.id +-- where backups.parts > 0 and size > 0 +-- order by backups.date +-- ; + })) { + warn "SQL: $sql\n"; + $dbh->do( $sql ); + } print "creating indexes: "; @@ -426,7 +430,7 @@ __END_OF_TRIGGER__ } ## delete data before inseting ## -if ($opt{d}) { +if ($opt->delete) { print "deleting "; foreach my $table (qw(files dvds backups shares hosts)) { print "$table "; @@ -451,7 +455,7 @@ INSERT INTO hosts (name, IP) VALUES (?,?) }); $sth->{hosts_by_name} = $dbh->prepare(qq{ -SELECT ID FROM hosts WHERE name=? +SELECT id FROM hosts WHERE name=? }); $sth->{backups_count} = $dbh->prepare(qq{ @@ -483,7 +487,9 @@ foreach my $host_key (@hosts) { my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key"; - $sth->{hosts_by_name}->execute($hosts->{$host_key}->{'host'}); + next if $opt->host && ! grep { m/^$hostname$/ } @{ $opt->host }; + + $sth->{hosts_by_name}->execute($hostname); unless (($hostID) = $sth->{hosts_by_name}->fetchrow_array()) { $sth->{insert_hosts}->execute( @@ -505,7 +511,7 @@ foreach my $host_key (@hosts) { ($#hosts + 1), $incs ); - print $host_header unless ($opt{q}); + print $host_header unless $opt->quiet; my $inc_nr = 0; $beenThere = {}; @@ -513,7 +519,7 @@ foreach my $host_key (@hosts) { foreach my $backup (@backups) { $inc_nr++; - last if (defined $opt{m} && $inc_nr > $opt{m}); + last if defined $opt->max && $inc_nr > $opt->max; my $backupNum = $backup->{'num'}; my @backupShares = (); @@ -526,9 +532,10 @@ foreach my $host_key (@hosts) { strftime($t_fmt,localtime($backup->{startTime})), fmt_time($backup->{endTime} - $backup->{startTime}) ); - print $share_header unless ($opt{q}); + print $share_header unless $opt->quiet; + status "$hostname $backupNum $share_header"; - my $files = BackupPC::View->new($bpc, $hostname, \@backups, { only_first => 1 }); + my $files = BackupPC::View->new($bpc, $hostname, \@backups, { only_increment => 1 }); foreach my $share ($files->shareList($backupNum)) { @@ -542,7 +549,7 @@ foreach my $host_key (@hosts) { next if ($count > 0); # dump host and share header for -q - if ($opt{q}) { + if ( $opt->quiet ) { if ($host_header) { print $host_header; $host_header = undef; @@ -581,16 +588,21 @@ foreach my $host_key (@hosts) { } my $dur = (time() - $t) || 1; - printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n", + my $status = sprintf("%d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]", $nf, $f, $nd, $d, ($size / 1024 / 1024), ( ($f+$d) / $dur ), fmt_time($dur) ); + print " $status\n"; + status "$hostname $backupNum $status"; if ($nf + $nd > 0) { - eval { hest_update($hostID, $shareID, $backupNum) }; - warn "ERROR: $@" if $@; + status "$hostname $backupNum full-text | indexing"; + #eval { hest_update($hostID, $shareID, $backupNum) }; + #warn "ERROR: $@" if $@; + hest_update($hostID, $shareID, $backupNum); + # eval breaks our re-try logic } }