r11658@llin: dpavlin | 2005-12-13 02:10:39 +0100
[BackupPC.git] / bin / BackupPC_updatedb
index 27b2e54..1b254b2 100755 (executable)
@@ -12,16 +12,25 @@ use Time::HiRes qw/time/;
 use File::Pid;
 use POSIX qw/strftime/;
 use BackupPC::SearchLib;
+use Cwd qw/abs_path/;
 
 use constant BPC_FTYPE_DIR => 5;
 use constant EST_CHUNK => 100000;
 
+# daylight saving time change offset for 1h
+my $dst_offset = 60 * 60;
+
 my $debug = 0;
 $|=1;
 
 my $start_t = time();
 
-my $pidfile = new File::Pid;
+my $pid_path = abs_path($0);
+$pid_path =~ s/\W+/_/g;
+
+my $pidfile = new File::Pid({
+       file => "/tmp/$pid_path",
+});
 
 if (my $pid = $pidfile->running ) {
        die "$0 already running: $pid\n";
@@ -29,8 +38,8 @@ if (my $pid = $pidfile->running ) {
        $pidfile->remove;
        $pidfile = new File::Pid;
 }
-$pidfile->write;
 print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n";
+$pidfile->write;
 
 my $t_fmt = '%Y-%m-%d %H:%M:%S';
 
@@ -50,9 +59,9 @@ my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 });
 
 my %opt;
 
-if ( !getopts("cdm:v:ij", \%opt ) ) {
+if ( !getopts("cdm:v:ijf", \%opt ) ) {
        print STDERR <<EOF;
-usage: $0 [-c|-d] [-m num] [-v|-v level] [-i]
+usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f]
 
 Options:
        -c      create database on first use
@@ -61,10 +70,14 @@ Options:
        -v num  set verbosity (debug) level (default $debug)
        -i      update Hyper Estraier full text index
        -j      update full text, don't check existing files
+       -f      don't do anything with full text index
 
 Option -j is variation on -i. It will allow faster initial creation
 of full-text index from existing database.
 
+Option -f will create database which is out of sync with full text index. You
+will have to re-run $0 with -i to fix it.
+
 EOF
        exit 1;
 }
@@ -72,6 +85,9 @@ EOF
 if ($opt{v}) {
        print "Debug level at $opt{v}\n";
        $debug = $opt{v};
+} elsif ($opt{f}) {
+       print "WARNING: disabling full-text index update. You need to re-run $0 -j !\n";
+       ($use_hest, $index_path, $index_node_url) = (undef, undef, undef);
 }
 
 #---- subs ----
@@ -111,11 +127,14 @@ sub hest_update {
 
        my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
 
-       unless ($use_hest) {
+       unless (defined($use_hest)) {
                print STDERR "HyperEstraier support not enabled in configuration\n";
+               $use_hest = 0;
                return;
        }
 
+       return unless($use_hest);
+
        print curr_time," updating HyperEstraier:";
 
        my $t = time();
@@ -143,7 +162,7 @@ sub hest_update {
 
                my $where = '';
                my @data;
-               if ($host_id && $share_id && $num) {
+               if (defined($host_id) && defined($share_id) && defined($num)) {
                        $where = qq{
                        WHERE
                                hosts.id = ? AND
@@ -209,7 +228,7 @@ sub hest_update {
                        $doc->add_attr('@uri', $uri);
 
                        foreach my $c (@{ $sth->{NAME} }) {
-                               $doc->add_attr($c, $row->{$c}) if ($row->{$c});
+                               $doc->add_attr($c, $row->{$c}) if (defined($row->{$c}));
                        }
 
                        #$doc->add_attr('@cdate', fmt_date($row->{'date'}));
@@ -256,10 +275,10 @@ sub hest_update {
 
 
 ## update index ##
-if (($opt{i} || $opt{j} || ($index_path && ! -e $index_path)) && !$opt{c}) {
+if (($opt{i} || $opt{j} || ($index_path && ! -e $TopDir . $index_path)) && !$opt{c}) {
        # update all
        print "force update of HyperEstraier index ";
-       print "importing existing data" unless (-e $index_path);
+       print "importing existing data" unless (-e $TopDir . $index_path);
        print "by -i flag" if ($opt{i});
        print "by -j flag" if ($opt{j});
        print "\n";
@@ -270,180 +289,127 @@ if (($opt{i} || $opt{j} || ($index_path && ! -e $index_path)) && !$opt{c}) {
 if ($opt{c}) {
        sub do_index {
                my $index = shift || return;
-               my ($table,$col,$unique) = split(/_/, $index);
+               my ($table,$col,$unique) = split(/:/, $index);
                $unique ||= '';
-               $index =~ s/,/_/g;
+               $index =~ s/\W+/_/g;
+               print "$index on $table($col)" . ( $unique ? "u" : "" ) . " ";
                $dbh->do(qq{ create $unique index $index on $table($col) });
        }
 
        print "creating tables...\n";
-      
-       $dbh->do(qq{
+
+       $dbh->do( qq{
                create table hosts (
                        ID      SERIAL          PRIMARY KEY,
                        name    VARCHAR(30)     NOT NULL,
                        IP      VARCHAR(15)
                );            
-       });
-             
-       $dbh->do(qq{
+
                create table shares (
                        ID      SERIAL          PRIMARY KEY,
                        hostID  INTEGER         NOT NULL references hosts(id),
                        name    VARCHAR(30)     NOT NULL,
-                       share   VARCHAR(200)    NOT NULL,
-                       localpath VARCHAR(200)      
+                       share   VARCHAR(200)    NOT NULL
                );            
-       });
 
-       $dbh->do(qq{
                create table dvds (
                        ID      SERIAL          PRIMARY KEY, 
                        num     INTEGER         NOT NULL,
                        name    VARCHAR(255)    NOT NULL,
                        mjesto  VARCHAR(255)
                );
-       });
-       
-       $dbh->do(qq{
+
                create table backups (
+                       id      serial,
                        hostID  INTEGER         NOT NULL references hosts(id),
                        num     INTEGER         NOT NULL,
                        date    integer         NOT NULL, 
                        type    CHAR(4)         not null,
                        shareID integer         not null references shares(id),
                        size    bigint          not null,
-                       PRIMARY KEY(hostID, num, shareID) 
+                       inc_size bigint         not null default -1,
+                       inc_deleted boolean     default false,
+                       parts   integer         not null default 1,
+                       PRIMARY KEY(id)
                );            
-       });
-
-       #do_index('backups_hostid,num_unique');
 
-
-       $dbh->do(qq{     
                create table files (
-                       ID      SERIAL          PRIMARY KEY,  
-                       shareID INTEGER         NOT NULL references shares(id),
-                       backupNum  INTEGER      NOT NULL,
-                       name       VARCHAR(255) NOT NULL,
-                       path       VARCHAR(255) NOT NULL,
-                       date       integer      NOT NULL,
-                       type       INTEGER      NOT NULL,
-                       size       INTEGER      NOT NULL
+                       ID              SERIAL,
+                       shareID         INTEGER NOT NULL references shares(id),
+                       backupNum       INTEGER NOT NULL,
+                       name            VARCHAR(255) NOT NULL,
+                       path            VARCHAR(255) NOT NULL,
+                       date            integer NOT NULL,
+                       type            INTEGER NOT NULL,
+                       size            bigint  NOT NULL,
+                       primary key(id)
                );
-       });
-
 
-       $dbh->do( qq{
-               create table archive
-               (
-                       id                      int not null,
+               create table archive (
+                       id              serial,
                        dvd_nr          int not null,
+                       total_size      bigint default -1,
                        note            text,
                        username        varchar(20) not null,
-                       date            timestamp,
+                       date            timestamp default now(),
                        primary key(id)
                );      
-       }
-       );
 
-       $dbh->do( qq{
-               create table archive_backup
-               (
-                       archive_id      int not null,
-                       backup_id       int not null,
-                       status          text,
+               create table archive_backup (
+                       archive_id      int not null references archive(id) on delete cascade,
+                       backup_id       int not null references backups(id),
                        primary key(archive_id, backup_id)
                );
-       });
-
-       $dbh->do( qq{
-               create table workflows(
-                       id                      int not null,
-                       step_id         int not null,
-                       start           timestamp,
-                       stop            timestamp,
-                       username        varchar(20),
-                       archive_id      int not null,
-                       running         boolean default true,
-                       primary key(id)
-               );
-       });
 
-       $dbh->do( qq{
-               create table workflow_step
-               (
-                       step_id         int not null,
-                       code            text,
-                       next_step       int,
-                       stop            boolean default false,
-                       primary key(step_id)
+               create table archive_burned (
+                       archive_id      int references archive(id),
+                       date            timestamp default now(),
+                       part            int not null default 1,
+                       copy            int not null default 1,
+                       iso_size bigint default -1
                );
-       });
-
-       $dbh->do( qq{
-                       alter table workflow_step
-                               add constraint fk_workflow_next_step
-                               foreign key(next_step)
-                               references workflow_step(step_id);
-       });
-
-       $dbh->do( qq{
-               alter table workflows
-                       add constraint fk_workflows_step_id
-                       foreign key(step_id)
-                       references workflow_step(step_id);
-       });
-       
-       $dbh->do( qq{
-               alter table workflows
-                       add constraint fk_workflows_archive_id
-                       foreign key(archive_id)
-                       references archive(id); 
-       });
 
-       $dbh->do( qq{
-               create table workflow_log
-               (
-                       workflow_id             int not null,
-                       step_id                 int not null,
-                       date                    timestamp not null,
-                       status                  text,
-                       primary key(workflow_id, step_id)
+               create table backup_parts (
+                       id serial,
+                       backup_id int references backups(id),
+                       part_nr int not null check (part_nr > 0),
+                       tar_size bigint not null check (tar_size > 0),
+                       size bigint not null check (size > 0),
+                       md5 text not null,
+                       items int not null check (items > 0),
+                       date timestamp default now(),
+                       primary key(id)
                );
        });
 
-       $dbh->do( qq{
-               alter table workflow_log
-                       add constraint fk_workflow_log_workflow_id
-                       foreign key (workflow_id)
-                       references workflows(id);
-               });
-       
-       $dbh->do( qq{
-               alter table workflow_log
-                       add constraint fk_workflow_log_step_id
-                       foreign key (step_id)
-                       references      workflow_step(step_id);
-               });
-
-       print "creating indexes:";
+       print "creating indexes: ";
 
        foreach my $index (qw(
-               hosts_name
-               backups_hostID
-               backups_num
-               shares_hostID
-               shares_name
-               files_shareID
-               files_path
-               files_name
-               files_date
-               files_size
+               hosts:name
+               backups:hostID
+               backups:num
+               backups:shareID
+               shares:hostID
+               shares:name
+               files:shareID
+               files:path
+               files:name
+               files:date
+               files:size
+               archive:dvd_nr
+               archive_burned:archive_id
+               backup_parts:backup_id,part_nr
        )) {
-               print " $index";
                do_index($index);
        }
+
+       print " creating sequence: ";
+       foreach my $seq (qw/dvd_nr/) {
+               print "$seq ";
+               $dbh->do( qq{ CREATE SEQUENCE $seq } );
+       }
+
+
        print "...\n";
 
        $dbh->commit;
@@ -487,7 +453,12 @@ WHERE hostID=? AND num=? AND shareid=?
 
 $sth->{insert_backups} = $dbh->prepare(qq{
 INSERT INTO backups (hostID, num, date, type, shareid, size)
-VALUES (?,?,?,?,?,?)
+VALUES (?,?,?,?,?,-1)
+});
+
+$sth->{update_backups_size} = $dbh->prepare(qq{
+UPDATE backups SET size = ?
+WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ?
 });
 
 $sth->{insert_files} = $dbh->prepare(qq{
@@ -496,7 +467,10 @@ INSERT INTO files
        VALUES (?,?,?,?,?,?,?)
 });
 
-foreach my $host_key (keys %{$hosts}) {
+my @hosts = keys %{$hosts};
+my $host_nr = 0;
+
+foreach my $host_key (@hosts) {
 
        my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key";
 
@@ -511,7 +485,9 @@ foreach my $host_key (keys %{$hosts}) {
                $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef);
        }
 
-       print "host ".$hosts->{$host_key}->{'host'}.": ";
+       $host_nr++;
+       print "host ", $hosts->{$host_key}->{'host'}, " [", 
+               $host_nr, "/", ($#hosts + 1), "]: ";
  
        # get backups for a host
        my @backups = $bpc->BackupInfoRead($hostname);
@@ -553,19 +529,32 @@ foreach my $host_key (keys %{$hosts}) {
                        # dump some log
                        print curr_time," ", $share;
 
-                       my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
-
                        $sth->{insert_backups}->execute(
                                $hostID,
                                $backupNum,
                                $backup->{'endTime'},
                                substr($backup->{'type'},0,4),
                                $shareID,
-                               $size,
                        );
 
-                       print " commit";
-                       $dbh->commit();
+                       my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
+
+                       eval {
+                               $sth->{update_backups_size}->execute(
+                                       $size,
+                                       $hostID,
+                                       $backupNum,
+                                       $backup->{'endTime'},
+                                       substr($backup->{'type'},0,4),
+                                       $shareID,
+                               );
+                               print " commit";
+                               $dbh->commit();
+                       };
+                       if ($@) {
+                               print " rollback";
+                               $dbh->rollback();
+                       }
 
                        my $dur = (time() - $t) || 1;
                        printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",
@@ -604,14 +593,14 @@ sub getShareID() {
 
        $sth->{insert_share} ||= $dbh->prepare(qq{
                INSERT INTO shares 
-                       (hostID,name,share,localpath
-               VALUES (?,?,?,?)
+                       (hostID,name,share) 
+               VALUES (?,?,?)
        });
 
        my $drop_down = $hostname . '/' . $share;
        $drop_down =~ s#//+#/#g;
 
-       $sth->{insert_share}->execute($hostID,$share, $drop_down ,undef);
+       $sth->{insert_share}->execute($hostID,$share, $drop_down);
        return $dbh->last_insert_id(undef,undef,'shares',undef);
 }
 
@@ -628,12 +617,12 @@ sub found_in_db {
                SELECT 1 FROM files
                WHERE shareID = ? and
                        path = ? and 
-                       date = ? and
-                       size = ?
+                       size = ? and
+                       ( date = ? or date = ? or date = ? )
                LIMIT 1
        });
 
-       my @param = ($shareID,$path,$date,$size);
+       my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset);
        $sth->{file_in_db}->execute(@param);
        my $rows = $sth->{file_in_db}->rows;
        print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3);
@@ -683,8 +672,29 @@ sub recurseDir($$$$$$$$) {
                                $filesInBackup->{$path_key}->{'size'}
                        ));
 
+                       my $key_dst_prev = join(" ", (
+                               $shareID,
+                               $dir,
+                               $path_key,
+                               $filesInBackup->{$path_key}->{'mtime'} - $dst_offset,
+                               $filesInBackup->{$path_key}->{'size'}
+                       ));
+
+                       my $key_dst_next = join(" ", (
+                               $shareID,
+                               $dir,
+                               $path_key,
+                               $filesInBackup->{$path_key}->{'mtime'} + $dst_offset,
+                               $filesInBackup->{$path_key}->{'size'}
+                       ));
+
                        my $found;
-                       if (! defined($beenThere->{$key}) && ! ($found = found_in_db($key, @data)) ) {
+                       if (
+                               ! defined($beenThere->{$key}) &&
+                               ! defined($beenThere->{$key_dst_prev}) &&
+                               ! defined($beenThere->{$key_dst_next}) &&
+                               ! ($found = found_in_db($key, @data))
+                       ) {
                                print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2);
 
                                if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {