use BackupPC::Lib;
use BackupPC::View;
use Data::Dumper;
-use Getopt::Std;
+use Getopt::Long::Descriptive;
use Time::HiRes qw/time/;
use File::Pid;
use POSIX qw/strftime/;
my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 });
-my %opt;
+my ($opt,$usage) = describe_options(
+"%c %o",
+[ 'create|c', "create database on first use" ],
+[ 'delete|d', "delete database before import" ],
+[ 'max|m=i', "import just max increments for one host" ],
+[ 'host|h=s@', "import just host(s)" ],
+[],
+[ 'verbose|v:i', 'set verbosity (debug) level' ],
+[ 'index|i', 'update full text index' ],
+[ 'junk|j', "update full text, don't check existing files" ],
+[ 'fast|f', "don't do anything with full text index" ],
+[ 'quiet|q', "be quiet for hosts without changes" ],
+[ 'help', "show help" ],
+);
+
+print($usage->text), exit if $opt->help;
+
+warn "hosts: ",dump( $opt->host );
-if ( !getopts("cdm:v:ijfq", \%opt ) ) {
- print STDERR <<EOF;
-usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f]
-
-Options:
- -c create database on first use
- -d delete database before import
- -m num import just num increments for one host
- -v num set verbosity (debug) level (default $debug)
- -i update Hyper Estraier full text index
- -j update full text, don't check existing files
- -f don't do anything with full text index
- -q be quiet for hosts without changes
-
-Option -j is variation on -i. It will allow faster initial creation
-of full-text index from existing database.
-
-Option -f will create database which is out of sync with full text index. You
-will have to re-run $0 with -i to fix it.
-
-EOF
- exit 1;
-}
+#---- subs ----
-if ($opt{v}) {
- print "Debug level at $opt{v}\n";
- $debug = $opt{v};
-} elsif ($opt{f}) {
- print "WARNING: disabling full-text index update. You need to re-run $0 -j !\n";
- $index_node_url = undef;
+sub status {
+ my $text = shift;
+ $text =~ s{\s+$}{};
+ my $new = $0;
+ $new =~ s{^[\w\/]+/(\w+) }{$1 }; # strip path from process name
+ if ( $text =~ m/^\|/ ) {
+ $new =~ s/\|.*/$text/ or $new .= " $text";
+ } else {
+ $new =~ s/\s+.*/ $text/ or $new .= " $text";
+ }
+ $0 = $new;
}
-#---- subs ----
-
sub fmt_time {
my $t = shift || return;
my $out = "";
my ($host_id, $share_id, $num) = @_;
- my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
+ my $skip_check = $opt->junk && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
print curr_time," updating fulltext:";
}
print "$added";
+ status "| $added";
$offset += EST_CHUNK;
## update index ##
-if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) {
+if ( ( $opt->index || $opt->junk ) && !$opt->create ) {
# update all
print "force update of Hyper Estraier index ";
- print "by -i flag" if ($opt{i});
- print "by -j flag" if ($opt{j});
+ print "by -i flag" if ($opt->index);
+ print "by -j flag" if ($opt->junk);
print "\n";
hest_update();
}
## create tables ##
-if ($opt{c}) {
+if ($opt->create) {
sub do_index {
my $index = shift || return;
my ($table,$col,$unique) = split(/:/, $index);
print "creating tables...\n";
- $dbh->do( qq{
+ foreach my $sql ( split(/;/, qq{
create table hosts (
ID SERIAL PRIMARY KEY,
name VARCHAR(30) NOT NULL,
PRIMARY KEY(id)
);
+ create table backup_parts (
+ id serial,
+ backup_id int references backups(id),
+ part_nr int not null check (part_nr > 0),
+ tar_size bigint not null check (tar_size > 0),
+ size bigint not null check (size > 0),
+ md5 text not null,
+ items int not null check (items > 0),
+ date timestamp default now(),
+ filename text not null,
+ primary key(id)
+ );
+
create table files (
ID SERIAL,
shareID INTEGER NOT NULL references shares(id),
primary key(id)
);
- create table archive_backup (
+ create table archive_parts (
archive_id int not null references archive(id) on delete cascade,
- backup_id int not null references backups(id),
- primary key(archive_id, backup_id)
+ backup_id int not null references backups(id),
+ backup_part_id int not null references backup_parts(id),
+ primary key(archive_id, backup_id, backup_part_id)
);
create table archive_burned (
iso_size bigint default -1
);
- create table backup_parts (
- id serial,
- backup_id int references backups(id),
- part_nr int not null check (part_nr > 0),
- tar_size bigint not null check (tar_size > 0),
- size bigint not null check (size > 0),
- md5 text not null,
- items int not null check (items > 0),
- date timestamp default now(),
- primary key(id)
- );
-
-- report backups and corresponding dvd
-
- create view backups_on_dvds as
- select
- backups.id as id,
- hosts.name || ':' || shares.name as share,
- backups.num as num,
- backups.type as type,
- abstime(backups.date) as backup_date,
- backups.size as size,
- backups.inc_size as gzip_size,
- archive.id as archive_id,
- archive.dvd_nr
- from backups
- join shares on backups.shareid=shares.id
- join hosts on shares.hostid = hosts.id
- left outer join archive_backup on backups.id = archive_backup.backup_id
- left outer join archive on archive_backup.archive_id = archive.id
- where backups.parts > 0 and size > 0
- order by backups.date
- ;
- });
+--
+-- create view backups_on_dvds as
+-- select
+-- backups.id as id,
+-- hosts.name || ':' || shares.name as share,
+-- backups.num as num,
+-- backups.type as type,
+-- abstime(backups.date) as backup_date,
+-- backups.size as size,
+-- backups.inc_size as gzip_size,
+-- archive.id as archive_id,
+-- archive.dvd_nr
+-- from backups
+-- join shares on backups.shareid=shares.id
+-- join hosts on shares.hostid = hosts.id
+-- left outer join archive_backup on backups.id = archive_backup.backup_id
+-- left outer join archive on archive_backup.archive_id = archive.id
+-- where backups.parts > 0 and size > 0
+-- order by backups.date
+-- ;
+ })) {
+ warn "SQL: $sql\n";
+ $dbh->do( $sql );
+ }
print "creating indexes: ";
}
## delete data before inseting ##
-if ($opt{d}) {
+if ($opt->delete) {
print "deleting ";
foreach my $table (qw(files dvds backups shares hosts)) {
print "$table ";
});
$sth->{hosts_by_name} = $dbh->prepare(qq{
-SELECT ID FROM hosts WHERE name=?
+SELECT id FROM hosts WHERE name=?
});
$sth->{backups_count} = $dbh->prepare(qq{
my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key";
- $sth->{hosts_by_name}->execute($hosts->{$host_key}->{'host'});
+ next if $opt->host && ! grep { m/^$hostname$/ } @{ $opt->host };
+
+ $sth->{hosts_by_name}->execute($hostname);
unless (($hostID) = $sth->{hosts_by_name}->fetchrow_array()) {
$sth->{insert_hosts}->execute(
($#hosts + 1),
$incs
);
- print $host_header unless ($opt{q});
+ print $host_header unless $opt->quiet;
my $inc_nr = 0;
$beenThere = {};
foreach my $backup (@backups) {
$inc_nr++;
- last if (defined $opt{m} && $inc_nr > $opt{m});
+ last if defined $opt->max && $inc_nr > $opt->max;
my $backupNum = $backup->{'num'};
my @backupShares = ();
strftime($t_fmt,localtime($backup->{startTime})),
fmt_time($backup->{endTime} - $backup->{startTime})
);
- print $share_header unless ($opt{q});
+ print $share_header unless $opt->quiet;
+ status "$hostname $backupNum $share_header";
- my $files = BackupPC::View->new($bpc, $hostname, \@backups, { only_first => 1 });
+ my $files = BackupPC::View->new($bpc, $hostname, \@backups, { only_increment => 1 });
foreach my $share ($files->shareList($backupNum)) {
next if ($count > 0);
# dump host and share header for -q
- if ($opt{q}) {
+ if ( $opt->quiet ) {
if ($host_header) {
print $host_header;
$host_header = undef;
}
my $dur = (time() - $t) || 1;
- printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",
+ my $status = sprintf("%d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]",
$nf, $f, $nd, $d,
($size / 1024 / 1024),
( ($f+$d) / $dur ),
fmt_time($dur)
);
+ print " $status\n";
+ status "$hostname $backupNum $status";
if ($nf + $nd > 0) {
- eval { hest_update($hostID, $shareID, $backupNum) };
- warn "ERROR: $@" if $@;
+ status "$hostname $backupNum full-text | indexing";
+ #eval { hest_update($hostID, $shareID, $backupNum) };
+ #warn "ERROR: $@" if $@;
+ hest_update($hostID, $shareID, $backupNum);
+ # eval breaks our re-try logic
}
}