begin search integration into version 3.2.0
authorDobrica Pavlinusic <dpavlin@backuppc.(none)>
Mon, 3 Jan 2011 16:49:17 +0000 (16:49 +0000)
committerDobrica Pavlinusic <dpavlin@backuppc.(none)>
Mon, 3 Jan 2011 16:49:17 +0000 (16:49 +0000)
bin/BackupPC_updatedb [new file with mode: 0755]
lib/BackupPC/SearchLib.pm [new file with mode: 0644]

diff --git a/bin/BackupPC_updatedb b/bin/BackupPC_updatedb
new file mode 100755 (executable)
index 0000000..e72ba3d
--- /dev/null
@@ -0,0 +1,791 @@
+#!/usr/bin/perl
+
+use strict;
+#use lib "/data/backuppc/lib";
+use lib "/usr/share/backuppc/lib";
+
+use DBI;
+use BackupPC::Lib;
+use BackupPC::View;
+use Data::Dumper;
+use Getopt::Std;
+use Time::HiRes qw/time/;
+use File::Pid;
+use POSIX qw/strftime/;
+use BackupPC::SearchLib;
+use Cwd qw/abs_path/;
+use Data::Dump qw(dump);
+
+use constant BPC_FTYPE_DIR => 5;
+use constant EST_CHUNK => 4096;
+
+# daylight saving time change offset for 1h
+my $dst_offset = 60 * 60;
+
+my $debug = 0;
+$|=1;
+
+my $start_t = time();
+
+my $pid_path = abs_path($0);
+$pid_path =~ s/\W+/_/g;
+
+my $pidfile = new File::Pid({
+       file => "/tmp/$pid_path",
+});
+
+if (my $pid = $pidfile->running ) {
+       die "$0 already running: $pid\n";
+} elsif ($pidfile->pid ne $$) {
+       $pidfile->remove;
+       $pidfile = new File::Pid;
+}
+print STDERR "$0 using pid ",$pidfile->pid," file ",$pidfile->file,"\n";
+$pidfile->write;
+
+my $t_fmt = '%Y-%m-%d %H:%M:%S';
+
+my $hosts;
+my $bpc = BackupPC::Lib->new || die;
+my %Conf = $bpc->Conf();
+my $TopDir = $bpc->TopDir();
+my $beenThere = {};
+
+my $dsn = $Conf{SearchDSN} || die "Need SearchDSN in config.pl\n";
+my $user = $Conf{SearchUser} || '';
+
+my $index_node_url = $Conf{HyperEstraierIndex};
+
+my $dbh = DBI->connect($dsn, $user, "", { RaiseError => 1, AutoCommit => 0 });
+
+my %opt;
+
+if ( !getopts("cdm:v:ijfq", \%opt ) ) {
+       print STDERR <<EOF;
+usage: $0 [-c|-d] [-m num] [-v|-v level] [-i|-j|-f]
+
+Options:
+       -c      create database on first use
+       -d      delete database before import
+       -m num  import just num increments for one host
+       -v num  set verbosity (debug) level (default $debug)
+       -i      update Hyper Estraier full text index
+       -j      update full text, don't check existing files
+       -f      don't do anything with full text index
+       -q      be quiet for hosts without changes
+
+Option -j is variation on -i. It will allow faster initial creation
+of full-text index from existing database.
+
+Option -f will create database which is out of sync with full text index. You
+will have to re-run $0 with -i to fix it.
+
+EOF
+       exit 1;
+}
+
+if ($opt{v}) {
+       print "Debug level at $opt{v}\n";
+       $debug = $opt{v};
+} elsif ($opt{f}) {
+       print "WARNING: disabling full-text index update. You need to re-run $0 -j !\n";
+       $index_node_url = undef;
+}
+
+#---- subs ----
+
+sub fmt_time {
+       my $t = shift || return;
+       my $out = "";
+       my ($ss,$mm,$hh) = gmtime($t);
+       $out .= "${hh}h" if ($hh);
+       $out .= sprintf("%02d:%02d", $mm,$ss);
+       return $out;
+}
+
+sub curr_time {
+       return strftime($t_fmt,localtime());
+}
+
+my $hest_node;
+
+sub hest_update {
+
+       my ($host_id, $share_id, $num) = @_;
+
+       my $skip_check = $opt{j} && print STDERR "Skipping check for existing files -- this should be used only with initital import\n";
+
+       unless ($index_node_url && $index_node_url =~ m#^http://#) {
+               print STDERR "HyperEstraier support not enabled or index node invalid\n" if ($debug);
+               $index_node_url = 0;
+               return;
+       }
+
+       print curr_time," updating Hyper Estraier:";
+
+       my $t = time();
+
+       my $offset = 0;
+       my $added = 0;
+
+       if ($index_node_url) {
+               print " opening index $index_node_url";
+               $hest_node ||= Search::Estraier::Node->new(
+                       url => $index_node_url,
+                       user => 'admin',
+                       passwd => 'admin',
+                       croak_on_error => 1,
+               );
+               print " via node URL";
+       }
+
+       my $results = 0;
+
+       do {
+
+               my $where = '';
+               my @data;
+               if (defined($host_id) && defined($share_id) && defined($num)) {
+                       $where = qq{
+                       WHERE
+                               hosts.id = ? AND
+                               shares.id = ? AND
+                               files.backupnum = ?
+                       };
+                       @data = ( $host_id, $share_id, $num );
+               }
+
+               my $limit = sprintf('LIMIT '.EST_CHUNK.' OFFSET %d', $offset);
+
+               my $sth = $dbh->prepare(qq{
+                       SELECT
+                               files.id                        AS fid,
+                               hosts.name                      AS hname,
+                               shares.name                     AS sname,
+                               -- shares.share                 AS sharename,
+                               files.backupnum                 AS backupnum,
+                               -- files.name                   AS filename,
+                               files.path                      AS filepath,
+                               files.date                      AS date,
+                               files.type                      AS type,
+                               files.size                      AS size,
+                               files.shareid                   AS shareid,
+                               backups.date                    AS backup_date
+                       FROM files 
+                               INNER JOIN shares       ON files.shareID=shares.ID
+                               INNER JOIN hosts        ON hosts.ID = shares.hostID
+                               INNER JOIN backups      ON backups.num = files.backupNum and backups.hostID = hosts.ID AND backups.shareID = shares.ID
+                       $where
+                       $limit
+               });
+
+               $sth->execute(@data);
+               $results = $sth->rows;
+
+               if ($results == 0) {
+                       print " - no new files\n";
+                       return;
+               } else {
+                       print "...";
+               }
+
+               sub fmt_date {
+                       my $t = shift || return;
+                       my $iso = BackupPC::Lib::timeStamp($t);
+                       $iso =~ s/\s/T/;
+                       return $iso;
+               }
+
+               while (my $row = $sth->fetchrow_hashref()) {
+
+                       my $uri = $row->{hname} . ':' . $row->{sname} . '#' . $row->{backupnum} . ' ' . $row->{filepath};
+                       if (! $skip_check && $hest_node) {
+                               my $id = $hest_node->uri_to_id($uri);
+                               next if ($id && $id == -1);
+                       }
+
+                       # create a document object 
+                       my $doc = Search::Estraier::Document->new;
+
+                       # add attributes to the document object 
+                       $doc->add_attr('@uri', $uri);
+
+                       foreach my $c (@{ $sth->{NAME} }) {
+                               print STDERR "attr $c = $row->{$c}\n" if ($debug > 2);
+                               $doc->add_attr($c, $row->{$c}) if (defined($row->{$c}));
+                       }
+
+                       #$doc->add_attr('@cdate', fmt_date($row->{'date'}));
+
+                       # add the body text to the document object 
+                       my $path = $row->{'filepath'};
+                       $doc->add_text($path);
+                       $path =~ s/(.)/$1 /g;
+                       $doc->add_hidden_text($path);
+
+                       print STDERR $doc->dump_draft,"\n" if ($debug > 1);
+
+                       # register the document object to the database
+                       $hest_node->put_doc($doc) if ($hest_node);
+
+                       $added++;
+               }
+
+               print "$added";
+
+               $offset += EST_CHUNK;
+
+       } while ($results == EST_CHUNK);
+
+       my $dur = (time() - $t) || 1;
+       printf(" [%.2f/s dur: %s]\n",
+               ( $added / $dur ),
+               fmt_time($dur)
+       );
+}
+
+#---- /subs ----
+
+
+## update index ##
+if ( ( $opt{i} || $opt{j} ) && !$opt{c} ) {
+       # update all
+       print "force update of Hyper Estraier index ";
+       print "by -i flag" if ($opt{i});
+       print "by -j flag" if ($opt{j});
+       print "\n";
+       hest_update();
+}
+
+## create tables ##
+if ($opt{c}) {
+       sub do_index {
+               my $index = shift || return;
+               my ($table,$col,$unique) = split(/:/, $index);
+               $unique ||= '';
+               $index =~ s/\W+/_/g;
+               print "$index on $table($col)" . ( $unique ? "u" : "" ) . " ";
+               $dbh->do(qq{ create $unique index $index on $table($col) });
+       }
+
+       print "creating tables...\n";
+
+       $dbh->do( qq{
+               create table hosts (
+                       ID      SERIAL          PRIMARY KEY,
+                       name    VARCHAR(30)     NOT NULL,
+                       IP      VARCHAR(15)
+               );            
+
+               create table shares (
+                       ID      SERIAL          PRIMARY KEY,
+                       hostID  INTEGER         NOT NULL references hosts(id),
+                       name    VARCHAR(30)     NOT NULL,
+                       share   VARCHAR(200)    NOT NULL
+               );            
+
+               create table dvds (
+                       ID      SERIAL          PRIMARY KEY, 
+                       num     INTEGER         NOT NULL,
+                       name    VARCHAR(255)    NOT NULL,
+                       mjesto  VARCHAR(255)
+               );
+
+               create table backups (
+                       id      serial,
+                       hostID  INTEGER         NOT NULL references hosts(id),
+                       num     INTEGER         NOT NULL,
+                       date    integer         NOT NULL, 
+                       type    CHAR(4)         not null,
+                       shareID integer         not null references shares(id),
+                       size    bigint          not null,
+                       inc_size bigint         not null default -1,
+                       inc_deleted boolean     default false,
+                       parts   integer         not null default 0,
+                       PRIMARY KEY(id)
+               );            
+
+               create table files (
+                       ID              SERIAL,
+                       shareID         INTEGER NOT NULL references shares(id),
+                       backupNum       INTEGER NOT NULL,
+                       name            VARCHAR(255) NOT NULL,
+                       path            VARCHAR(255) NOT NULL,
+                       date            integer NOT NULL,
+                       type            INTEGER NOT NULL,
+                       size            bigint  NOT NULL,
+                       primary key(id)
+               );
+
+               create table archive (
+                       id              serial,
+                       dvd_nr          int not null,
+                       total_size      bigint default -1,
+                       note            text,
+                       username        varchar(20) not null,
+                       date            timestamp default now(),
+                       primary key(id)
+               );      
+
+               create table archive_backup (
+                       archive_id      int not null references archive(id) on delete cascade,
+                       backup_id       int not null references backups(id),
+                       primary key(archive_id, backup_id)
+               );
+
+               create table archive_burned (
+                       archive_id      int references archive(id),
+                       date            timestamp default now(),
+                       part            int not null default 1,
+                       copy            int not null default 1,
+                       iso_size bigint default -1
+               );
+
+               create table backup_parts (
+                       id serial,
+                       backup_id int references backups(id),
+                       part_nr int not null check (part_nr > 0),
+                       tar_size bigint not null check (tar_size > 0),
+                       size bigint not null check (size > 0),
+                       md5 text not null,
+                       items int not null check (items > 0),
+                       date timestamp default now(),
+                       primary key(id)
+               );
+       });
+
+       print "creating indexes: ";
+
+       foreach my $index (qw(
+               hosts:name
+               backups:hostID
+               backups:num
+               backups:shareID
+               shares:hostID
+               shares:name
+               files:shareID
+               files:path
+               files:name
+               files:date
+               files:size
+               archive:dvd_nr
+               archive_burned:archive_id
+               backup_parts:backup_id,part_nr:unique
+       )) {
+               do_index($index);
+       }
+
+       print " creating sequence: ";
+       foreach my $seq (qw/dvd_nr/) {
+               print "$seq ";
+               $dbh->do( qq{ CREATE SEQUENCE $seq } );
+       }
+
+       print " creating triggers ";
+       $dbh->do( <<__END_OF_TRIGGER__ );
+
+create or replace function backup_parts_check() returns trigger as '
+declare
+       b_parts integer;
+       b_counted integer;
+       b_id    integer;
+begin
+       -- raise notice ''old/new parts %/% backup_id %/%'', old.parts, new.parts, old.id, new.id;
+       if (TG_OP=''UPDATE'') then
+               b_id := new.id;
+               b_parts := new.parts;
+       elsif (TG_OP = ''INSERT'') then
+               b_id := new.id;
+               b_parts := new.parts;
+       end if;
+       b_counted := (select count(*) from backup_parts where backup_id = b_id);
+       -- raise notice ''backup % parts %'', b_id, b_parts;
+       if ( b_parts != b_counted ) then
+               raise exception ''Update of backup % aborted, requested % parts and there are really % parts'', b_id, b_parts, b_counted;
+       end if;
+       return null;
+end;
+' language plpgsql;
+
+create trigger do_backup_parts_check
+       after insert or update or delete on backups
+       for each row execute procedure backup_parts_check();
+
+create or replace function backup_backup_parts_check() returns trigger as '
+declare
+       b_id            integer;
+       my_part_nr      integer;
+       calc_part       integer;
+begin
+       if (TG_OP = ''INSERT'') then
+               -- raise notice ''trigger: % backup_id %'', TG_OP, new.backup_id;
+               b_id = new.backup_id;
+               my_part_nr = new.part_nr;
+               execute ''update backups set parts = parts + 1 where id = '' || b_id;
+       elsif (TG_OP = ''DELETE'') then
+               -- raise notice ''trigger: % backup_id %'', TG_OP, old.backup_id;
+               b_id = old.backup_id;
+               my_part_nr = old.part_nr;
+               execute ''update backups set parts = parts - 1 where id = '' || b_id;
+       end if;
+       calc_part := (select count(part_nr) from backup_parts where backup_id = b_id);
+       if ( my_part_nr != calc_part ) then
+               raise exception ''Update of backup_parts with backup_id % aborted, requested part_nr is % and calulated next is %'', b_id, my_part_nr, calc_part;
+       end if;
+       return null;
+end;
+' language plpgsql;
+
+create trigger do_backup_backup_parts_check
+       after insert or update or delete on backup_parts
+       for each row execute procedure backup_backup_parts_check();
+
+__END_OF_TRIGGER__
+
+       print "...\n";
+
+       $dbh->commit;
+
+}
+
+## delete data before inseting ##
+if ($opt{d}) {
+       print "deleting ";
+       foreach my $table (qw(files dvds backups shares hosts)) {
+               print "$table ";
+               $dbh->do(qq{ DELETE FROM $table });
+       }
+       print " done...\n";
+
+       $dbh->commit;
+}
+
+## insert new values ##
+
+# get hosts
+$hosts = $bpc->HostInfoRead();
+my $hostID;
+my $shareID;
+
+my $sth;
+
+$sth->{insert_hosts} = $dbh->prepare(qq{
+INSERT INTO hosts (name, IP) VALUES (?,?)
+});
+
+$sth->{hosts_by_name} = $dbh->prepare(qq{
+SELECT ID FROM hosts WHERE name=?
+});
+
+$sth->{backups_count} = $dbh->prepare(qq{
+SELECT COUNT(*)
+FROM backups
+WHERE hostID=? AND num=? AND shareid=?
+});
+
+$sth->{insert_backups} = $dbh->prepare(qq{
+INSERT INTO backups (hostID, num, date, type, shareid, size)
+VALUES (?,?,?,?,?,-1)
+});
+
+$sth->{update_backups_size} = $dbh->prepare(qq{
+UPDATE backups SET size = ?
+WHERE hostID = ? and num = ? and date = ? and type =? and shareid = ?
+});
+
+$sth->{insert_files} = $dbh->prepare(qq{
+INSERT INTO files
+       (shareID, backupNum, name, path, date, type, size)
+       VALUES (?,?,?,?,?,?,?)
+});
+
+my @hosts = keys %{$hosts};
+my $host_nr = 0;
+
+foreach my $host_key (@hosts) {
+
+       my $hostname = $hosts->{$host_key}->{'host'} || die "can't find host for $host_key";
+
+       $sth->{hosts_by_name}->execute($hosts->{$host_key}->{'host'});
+
+       unless (($hostID) = $sth->{hosts_by_name}->fetchrow_array()) {
+               $sth->{insert_hosts}->execute(
+                       $hosts->{$host_key}->{'host'},
+                       $hosts->{$host_key}->{'ip'}
+               );
+
+               $hostID = $dbh->last_insert_id(undef,undef,'hosts',undef);
+       }
+
+       $host_nr++;
+       # get backups for a host
+       my @backups = $bpc->BackupInfoRead($hostname);
+warn "XXXX ",dump(@backups);
+       my $incs = scalar @backups;
+
+       my $host_header = sprintf("host %s [%d/%d]: %d increments\n",
+               $hosts->{$host_key}->{'host'},
+               $host_nr,
+               ($#hosts + 1),
+               $incs
+       );
+       print $host_header unless ($opt{q});
+       my $inc_nr = 0;
+       $beenThere = {};
+
+       foreach my $backup (@backups) {
+
+               $inc_nr++;
+               last if ($opt{m} && $inc_nr > $opt{m});
+
+               my $backupNum = $backup->{'num'};
+               my @backupShares = ();
+
+               my $share_header = sprintf("%-10s %2d/%-2d #%-2d %s %5s/%5s files (date: %s dur: %s)\n", 
+                       $hosts->{$host_key}->{'host'},
+                       $inc_nr, $incs, $backupNum, 
+                       $backup->{type} || '?',
+                       $backup->{nFilesNew} || '?', $backup->{nFiles} || '?',
+                       strftime($t_fmt,localtime($backup->{startTime})),
+                       fmt_time($backup->{endTime} - $backup->{startTime})
+               );
+               print $share_header unless ($opt{q});
+
+               my $files = BackupPC::View->new($bpc, $hostname, \@backups, 1);
+               foreach my $share ($files->shareList($backupNum)) {
+
+                       my $t = time();
+
+                       $shareID = getShareID($share, $hostID, $hostname);
+               
+                       $sth->{backups_count}->execute($hostID, $backupNum, $shareID);
+                       my ($count) = $sth->{backups_count}->fetchrow_array();
+                       # skip if allready in database!
+                       next if ($count > 0);
+
+                       # dump host and share header for -q
+                       if ($opt{q}) {
+                               if ($host_header) {
+                                       print $host_header;
+                                       $host_header = undef;
+                               }
+                               print $share_header;
+                       }
+
+                       # dump some log
+                       print curr_time," ", $share;
+
+                       $sth->{insert_backups}->execute(
+                               $hostID,
+                               $backupNum,
+                               $backup->{'endTime'},
+                               substr($backup->{'type'},0,4),
+                               $shareID,
+                       );
+
+                       my ($f, $nf, $d, $nd, $size) = recurseDir($bpc, $hostname, $files, $backupNum, $share, "", $shareID);
+
+                       eval {
+                               $sth->{update_backups_size}->execute(
+                                       $size,
+                                       $hostID,
+                                       $backupNum,
+                                       $backup->{'endTime'},
+                                       substr($backup->{'type'},0,4),
+                                       $shareID,
+                               );
+                               print " commit";
+                               $dbh->commit();
+                       };
+                       if ($@) {
+                               print " rollback";
+                               $dbh->rollback();
+                       }
+
+                       my $dur = (time() - $t) || 1;
+                       printf(" %d/%d files %d/%d dirs %0.2f MB [%.2f/s dur: %s]\n",
+                               $nf, $f, $nd, $d,
+                               ($size / 1024 / 1024),
+                               ( ($f+$d) / $dur ),
+                               fmt_time($dur)
+                       );
+
+                       hest_update($hostID, $shareID, $backupNum) if ($nf + $nd > 0);
+               }
+
+       }
+}
+undef $sth;
+$dbh->commit();
+$dbh->disconnect();
+
+print "total duration: ",fmt_time(time() - $start_t),"\n";
+
+$pidfile->remove;
+
+sub getShareID() {
+
+       my ($share, $hostID, $hostname) = @_;
+
+       $sth->{share_id} ||= $dbh->prepare(qq{
+               SELECT ID FROM shares WHERE hostID=? AND name=?
+       });
+
+       $sth->{share_id}->execute($hostID,$share);
+
+       my ($id) = $sth->{share_id}->fetchrow_array();
+
+       return $id if (defined($id));
+
+       $sth->{insert_share} ||= $dbh->prepare(qq{
+               INSERT INTO shares 
+                       (hostID,name,share) 
+               VALUES (?,?,?)
+       });
+
+       my $drop_down = $hostname . '/' . $share;
+       $drop_down =~ s#//+#/#g;
+
+       $sth->{insert_share}->execute($hostID,$share, $drop_down);
+       return $dbh->last_insert_id(undef,undef,'shares',undef);
+}
+
+sub found_in_db {
+
+       my @data = @_;
+       shift @data;
+
+       my ($key, $shareID,undef,$name,$path,$date,undef,$size) = @_;
+
+       return $beenThere->{$key} if (defined($beenThere->{$key}));
+
+       $sth->{file_in_db} ||= $dbh->prepare(qq{
+               SELECT 1 FROM files
+               WHERE shareID = ? and
+                       path = ? and 
+                       size = ? and
+                       ( date = ? or date = ? or date = ? )
+               LIMIT 1
+       });
+
+       my @param = ($shareID,$path,$size,$date, $date-$dst_offset, $date+$dst_offset);
+       $sth->{file_in_db}->execute(@param);
+       my $rows = $sth->{file_in_db}->rows;
+       print STDERR "## found_in_db($shareID,$path,$date,$size) ",( $rows ? '+' : '-' ), join(" ",@param), "\n" if ($debug >= 3);
+
+       $beenThere->{$key}++;
+
+       $sth->{'insert_files'}->execute(@data) unless ($rows);
+       return $rows;
+}
+
+####################################################
+# recursing through filesystem structure and       #
+# and returning flattened files list               #
+####################################################
+sub recurseDir($$$$$$$$) {
+
+       my ($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID) = @_;
+
+       print STDERR "\nrecurse($hostname,$backupNum,$share,$dir,$shareID)\n" if ($debug >= 1);
+
+       my ($nr_files, $new_files, $nr_dirs, $new_dirs, $size) = (0,0,0,0,0);
+
+       { # scope
+               my @stack;
+
+               print STDERR "# dirAttrib($backupNum, $share, $dir)\n" if ($debug >= 2);
+               my $filesInBackup = $files->dirAttrib($backupNum, $share, $dir);
+
+               # first, add all the entries in current directory
+               foreach my $path_key (keys %{$filesInBackup}) {
+                       print STDERR "# file ",Dumper($filesInBackup->{$path_key}),"\n" if ($debug >= 3);
+                       my @data = (
+                               $shareID,
+                               $backupNum,
+                               $path_key,
+                               $filesInBackup->{$path_key}->{'relPath'},
+                               $filesInBackup->{$path_key}->{'mtime'},
+                               $filesInBackup->{$path_key}->{'type'},
+                               $filesInBackup->{$path_key}->{'size'}
+                       );
+
+                       my $key = join(" ", (
+                               $shareID,
+                               $dir,
+                               $path_key,
+                               $filesInBackup->{$path_key}->{'mtime'},
+                               $filesInBackup->{$path_key}->{'size'}
+                       ));
+
+                       my $key_dst_prev = join(" ", (
+                               $shareID,
+                               $dir,
+                               $path_key,
+                               $filesInBackup->{$path_key}->{'mtime'} - $dst_offset,
+                               $filesInBackup->{$path_key}->{'size'}
+                       ));
+
+                       my $key_dst_next = join(" ", (
+                               $shareID,
+                               $dir,
+                               $path_key,
+                               $filesInBackup->{$path_key}->{'mtime'} + $dst_offset,
+                               $filesInBackup->{$path_key}->{'size'}
+                       ));
+
+                       my $found;
+                       if (
+                               ! defined($beenThere->{$key}) &&
+                               ! defined($beenThere->{$key_dst_prev}) &&
+                               ! defined($beenThere->{$key_dst_next}) &&
+                               ! ($found = found_in_db($key, @data))
+                       ) {
+                               print STDERR "# key: $key [", $beenThere->{$key},"]" if ($debug >= 2);
+
+                               if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {
+                                       $new_dirs++ unless ($found);
+                                       print STDERR " dir\n" if ($debug >= 2);
+                               } else {
+                                       $new_files++ unless ($found);
+                                       print STDERR " file\n" if ($debug >= 2);
+                               }
+                               $size += $filesInBackup->{$path_key}->{'size'} || 0;
+                       }
+
+                       if ($filesInBackup->{$path_key}->{'type'} == BPC_FTYPE_DIR) {
+                               $nr_dirs++;
+
+                               my $full_path = $dir . '/' . $path_key;
+                               push @stack, $full_path;
+                               print STDERR "### store to stack: $full_path\n" if ($debug >= 3);
+
+#                              my ($f,$nf,$d,$nd) = recurseDir($bpc, $hostname, $backups, $backupNum, $share, $path_key, $shareID) unless ($beenThere->{$key});
+#
+#                              $nr_files += $f;
+#                              $new_files += $nf;
+#                              $nr_dirs += $d;
+#                              $new_dirs += $nd;
+
+                       } else {
+                               $nr_files++;
+                       }
+               }
+
+               print STDERR "## STACK ",join(", ", @stack),"\n" if ($debug >= 2);
+
+               while ( my $dir = shift @stack ) {
+                       my ($f,$nf,$d,$nd, $s) = recurseDir($bpc, $hostname, $files, $backupNum, $share, $dir, $shareID);
+                       print STDERR "# $dir f: $f nf: $nf d: $d nd: $nd\n" if ($debug >= 1);
+                       $nr_files += $f;
+                       $new_files += $nf;
+                       $nr_dirs += $d;
+                       $new_dirs += $nd;
+                       $size += $s;
+               }
+       }
+
+       return ($nr_files, $new_files, $nr_dirs, $new_dirs, $size);
+}
+
diff --git a/lib/BackupPC/SearchLib.pm b/lib/BackupPC/SearchLib.pm
new file mode 100644 (file)
index 0000000..03d18c0
--- /dev/null
@@ -0,0 +1,1199 @@
+#!/usr/bin/perl
+package BackupPC::SearchLib;
+
+use strict;
+use BackupPC::CGI::Lib qw(:all);
+use BackupPC::Attrib qw(:all);
+use DBI;
+use DateTime;
+use vars qw(%In $MyURL);
+use Time::HiRes qw/time/;
+use XML::Writer;
+use IO::File;
+
+my $on_page = 100;
+my $pager_pages = 10;
+
+my $dsn = $Conf{SearchDSN};
+my $db_user = $Conf{SearchUser} || '';
+
+my $hest_node_url = $Conf{HyperEstraierIndex};
+
+my $dbh;
+
+
+
+sub get_dbh {
+       $dbh ||= DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } );
+       return $dbh;
+}
+
+sub getUnits() {
+       my @ret;
+
+       my $dbh = get_dbh();
+       my $sth = $dbh->prepare(qq{
+               SELECT
+                       shares.id       as id,
+                       hosts.name || ':' || shares.name as share
+               FROM shares
+               JOIN hosts on hostid = hosts.id
+               ORDER BY share
+       } );
+       $sth->execute();
+       push @ret, { 'id' => '', 'share' => '-'};       # dummy any
+
+       while ( my $row = $sth->fetchrow_hashref() ) {
+               push @ret, $row;
+       }
+       return @ret;
+}
+
+sub epoch_to_iso {
+       my $t = shift || return;
+       my $iso = BackupPC::Lib::timeStamp(undef, $t);
+       $iso =~ s/\s/&nbsp;/g;
+       return $iso;
+}
+
+sub dates_from_form($) {
+       my $param = shift || return;
+
+       sub mk_epoch_date($$) {
+               my ($name,$suffix) = @_;
+
+               my $yyyy = $param->{ $name . '_year_' . $suffix} || return undef;
+               my $mm .= $param->{ $name . '_month_' . $suffix} ||
+                       ( $suffix eq 'from' ? 1 : 12);
+               my $dd .= $param->{ $name . '_day_' . $suffix} ||
+                       ( $suffix eq 'from' ? 1 : 31);
+
+               $yyyy =~ s/\D//g;
+               $mm =~ s/\D//g;
+               $dd =~ s/\D//g;
+
+               my $h = my $m = my $s = 0;
+               if ($suffix eq 'to') {
+                       $h = 23;
+                       $m = 59;
+                       $s = 59;
+               }
+
+               my $dt = new DateTime(
+                       year => $yyyy,
+                       month => $mm,
+                       day => $dd,
+                       hour => $h,
+                       minute => $m,
+                       second => $s,
+               );
+               print STDERR "mk_epoch_date($name,$suffix) [$yyyy-$mm-$dd] = " . $dt->ymd . " " . $dt->hms . "\n";
+               return $dt->epoch || 'NULL';
+       }
+
+       my @ret = (
+               mk_epoch_date('search_backup', 'from'),
+               mk_epoch_date('search_backup', 'to'),
+               mk_epoch_date('search', 'from'),
+               mk_epoch_date('search', 'to'),
+       );
+
+       return @ret;
+
+}
+
+
+sub getWhere($) {
+       my $param = shift || return;
+
+       my ($backup_from, $backup_to, $files_from, $files_to) = dates_from_form($param);
+
+       my @conditions;
+       push @conditions, qq{ backups.date >= $backup_from } if ($backup_from);
+       push @conditions, qq{ backups.date <= $backup_to } if ($backup_to);
+       push @conditions, qq{ files.date >= $files_from } if ($files_from);
+       push @conditions, qq{ files.date <= $files_to } if ($files_to);
+
+       print STDERR "backup: $backup_from - $backup_to files: $files_from - $files_to cond:" . join(" and ",@conditions);
+
+       push( @conditions, ' files.shareid = ' . $param->{'search_share'} ) if ($param->{'search_share'});
+       push (@conditions, " upper(files.path) LIKE upper('%".$param->{'search_filename'}."%')") if ($param->{'search_filename'});
+
+       if ( $param->{burned} ) {
+               my $is_what = 'is null';
+               $is_what = '= 1' if ($param->{burned} eq 'burned');
+               push @conditions, "archive_burned.part $is_what";
+               push @conditions, "archive_burned.copy $is_what";
+       }
+
+       return join(" and ", @conditions);
+}
+
+my $sort_def = {
+       search => {
+               default => 'date_a',
+               sql => {
+                       share_d => 'shares.name DESC',
+                       share_a => 'shares.name ASC',
+                       path_d => 'files.path DESC',
+                       path_a => 'files.path ASC',
+                       num_d => 'files.backupnum DESC',
+                       num_a => 'files.backupnum ASC',
+                       size_d => 'files.size DESC',
+                       size_a => 'files.size ASC',
+                       date_d => 'files.date DESC',
+                       date_a => 'files.date ASC',
+               },
+               est => {
+                       share_d => 'sname STRD',
+                       share_a => 'sname STRA',
+                       path_d => 'filepath STRD',
+                       path_a => 'filepath STRA',
+                       num_d => 'backupnum NUMD',
+                       num_a => 'backupnum NUMA',
+                       size_d => 'size NUMD',
+                       size_a => 'size NUMA',
+                       date_d => 'date NUMD',
+                       date_a => 'date NUMA',
+               }
+       }, burn => {
+               default => 'date_a',
+               sql => {
+                       share_d => 'host DESC, share DESC',
+                       share_a => 'host ASC, share ASC',
+                       num_d => 'backupnum DESC',
+                       num_a => 'backupnum ASC',
+                       date_d => 'date DESC',
+                       date_a => 'date ASC',
+                       age_d => 'age DESC',
+                       age_a => 'age ASC',
+                       size_d => 'size DESC',
+                       size_a => 'size ASC',
+                       incsize_d => 'inc_size DESC',
+                       incsize_a => 'inc_size ASC',
+               }
+       }
+};
+
+sub getSort($$$) {
+       my ($part,$type, $sort_order) = @_;
+
+       die "unknown part: $part" unless ($sort_def->{$part});
+       die "unknown type: $type" unless ($sort_def->{$part}->{$type});
+
+       $sort_order ||= $sort_def->{$part}->{'default'};
+
+       if (my $ret = $sort_def->{$part}->{$type}->{$sort_order}) {
+               return $ret;
+       } else {
+               # fallback to default sort order
+               return $sort_def->{$part}->{$type}->{ $sort_def->{$part}->{'default'} };
+       }
+}
+
+sub getFiles($) {
+       my ($param) = @_;
+
+       my $offset = $param->{'offset'} || 0;
+       $offset *= $on_page;
+
+       my $dbh = get_dbh();
+
+       my $sql_cols = qq{
+               files.id                        AS fid,
+               hosts.name                      AS hname,
+               shares.name                     AS sname,
+               files.backupnum                 AS backupnum,
+               files.path                      AS filepath,
+               files.date                      AS date,
+               files.type                      AS type,
+               files.size                      AS size
+       };
+
+       my $sql_from = qq{
+               FROM files 
+                       INNER JOIN shares       ON files.shareID=shares.ID
+                       INNER JOIN hosts        ON hosts.ID = shares.hostID
+                       INNER JOIN backups      ON backups.num = files.backupnum and backups.hostID = hosts.ID AND backups.shareID = files.shareID
+       };
+
+       my $sql_where;
+       my $where = getWhere($param);
+       $sql_where = " WHERE ". $where if ($where);
+
+       # do we have to add tables for burned media?
+       if ( $param->{burned} ) {
+               $sql_from .= qq{
+                       LEFT OUTER JOIN archive_backup on archive_backup.backup_id = backups.id
+                       LEFT OUTER JOIN archive_burned on archive_burned.archive_id = archive_backup.archive_id
+               };
+       }
+
+       my $order = getSort('search', 'sql', $param->{'sort'});
+
+       my $sql_order = qq{
+               ORDER BY $order
+               LIMIT $on_page
+               OFFSET ?
+       };
+
+       my $sql_count = qq{ select count(files.id) $sql_from $sql_where };
+       my $sql_results = qq{ select $sql_cols $sql_from $sql_where $sql_order };
+
+       my $sth = $dbh->prepare($sql_count);
+       $sth->execute();
+       my ($results) = $sth->fetchrow_array();
+
+       $sth = $dbh->prepare($sql_results);
+       $sth->execute( $offset );
+
+       if ($sth->rows != $results) {
+               my $bug = "$0 BUG: [[ $sql_count ]] = $results while [[ $sql_results ]] = " . $sth->rows;
+               $bug =~ s/\s+/ /gs;
+               print STDERR "$bug\n";
+       }
+
+       my @ret;
+      
+       while (my $row = $sth->fetchrow_hashref()) {
+               push @ret, $row;
+       }
+     
+       $sth->finish();
+       return ($results, \@ret);
+}
+
+sub getHyperEstraier_url($) {
+       my ($use_hest) = @_;
+
+       return unless $use_hest;
+
+       use Search::Estraier 0.04;
+       die "direct access to Hyper Estraier datatase is no longer supported. Please use estmaster\n"
+               unless ($use_hest =~ m#^http://#);
+
+       return $use_hest;
+}
+
+sub getFilesHyperEstraier($) {
+       my ($param) = @_;
+
+       my $offset = $param->{'offset'} || 0;
+       $offset *= $on_page;
+
+       die "no Hyper Estraier node URL?" unless ($hest_node_url);
+
+       # open the database
+       my $db;
+       if ($hest_node_url) {
+               $db ||= Search::Estraier::Node->new($hest_node_url);
+               $db->set_auth('admin', 'admin');
+       } else {
+               die "BUG: unimplemented";
+       }
+
+       # create a search condition object
+       my $cond = Search::Estraier::Condition->new();
+
+       my $q = $param->{'search_filename'};
+       my $shareid = $param->{'search_share'};
+
+       if (length($q) > 0) {
+               # exact match
+               $cond->add_attr("filepath ISTRINC $q");
+
+               $q =~ s/(.)/$1 /g;
+               # set the search phrase to the search condition object
+               $cond->set_phrase($q);
+       }
+
+       my ($backup_from, $backup_to, $files_from, $files_to) = dates_from_form($param);
+
+       $cond->add_attr("backup_date NUMGE $backup_from") if ($backup_from);
+       $cond->add_attr("backup_date NUMLE $backup_to") if ($backup_to);
+
+       $cond->add_attr("date NUMGE $files_from") if ($files_from);
+       $cond->add_attr("date NUMLE $files_to") if ($files_to);
+
+       $cond->add_attr("shareid NUMEQ $shareid") if ($shareid);
+
+       $cond->set_max( $offset + $on_page );
+       $cond->set_options( 'SURE' );
+       $cond->set_order( getSort('search', 'est', $param->{'sort'} ) );
+
+       # get the result of search
+       my @res;
+       my ($result, $hits);
+
+       if ($hest_node_url) {
+               $result = $db->search($cond, 0);
+               if ($result) {
+                       $hits = $result->hits;
+               } else {
+                       $hits = 0;
+                       return ($hits,[]);
+               }
+       } else {
+               die "BUG: unimplemented";
+       }
+
+       # for each document in result
+       for my $i ($offset .. ($offset + $on_page - 1)) {
+               last if ($i >= $result->doc_num);
+
+               my $doc;
+               if ($hest_node_url) {
+                       $doc = $result->get_doc($i);
+               } else {
+                       die "BUG: unimplemented";
+               }
+
+               my $row;
+               foreach my $c (qw/fid hname sname backupnum filepath date type size/) {
+                       $row->{$c} = $doc->attr($c);
+               }
+               push @res, $row;
+       }
+
+       return ($hits, \@res);
+}
+
+sub getGzipName($$$)
+{
+       my ($host, $share, $backupnum) = @_;
+       my $ret = $Conf{GzipSchema};
+       
+       $share =~ s/\//_/g;
+       $ret =~ s/\\h/$host/ge;
+       $ret =~ s/\\s/$share/ge;
+       $ret =~ s/\\n/$backupnum/ge;
+
+       $ret =~ s/__+/_/g;
+
+       return $ret;
+       
+}
+
+sub get_tgz_size_by_name($) {
+       my $name = shift;
+
+       my $tgz = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}.'/'.$name;
+       my $size = -1;
+
+       my $Dir = $Conf{InstallDir}."/data/log";
+       $|=1;
+       if (-f "${tgz}.tar.gz") {
+               $size = (stat("${tgz}.tar.gz"))[7];
+       } elsif (-d $tgz) {
+               opendir(my $dir, $tgz) || die "can't opendir $tgz: $!";
+               my @parts = grep { !/^\./ && !/md5/ && -f "$tgz/$_" } readdir($dir);
+               $size = 0;
+               foreach my $part (@parts) {
+                       my $currSize =  (stat("$tgz/$part"))[7]; 
+                       $size += (stat("$tgz/$part"))[7] || die "can't stat $tgz/$part: $!";
+               }
+
+               closedir $dir;
+       } else {
+               return -1;
+       }
+
+       return $size;
+}
+
+sub getGzipSizeFromBackupID($) {
+       my ($backupID) = @_;
+       my $dbh = get_dbh();
+       my $sql = q{
+                               SELECT hosts.name  as host,
+                                          shares.name as share,
+                                          backups.num as backupnum
+                               FROM hosts, backups, shares
+                               WHERE shares.id=backups.shareid AND
+                                         hosts.id =backups.hostid AND
+                                         backups.id = ?
+       };
+       my $sth = $dbh->prepare($sql);
+       $sth->execute($backupID);
+       my $row = $sth->fetchrow_hashref();
+
+       return get_tgz_size_by_name(
+               getGzipName($row->{'host'}, $row->{share}, $row->{backupnum})
+       );
+}
+
+sub getGzipSize($$)
+{
+       my ($hostID, $backupNum) = @_;
+       my $sql;
+       my $dbh = get_dbh();
+       
+       $sql = q{ 
+                               SELECT hosts.name  as host,
+                                          shares.name as share,
+                                          backups.num as backupnum
+                               FROM hosts, backups, shares
+                               WHERE shares.id=backups.shareid AND
+                                         hosts.id =backups.hostid AND
+                                         hosts.id=? AND
+                                         backups.num=?
+                       };
+       my $sth = $dbh->prepare($sql);
+       $sth->execute($hostID, $backupNum);
+
+       my $row = $sth->fetchrow_hashref();
+
+       return get_tgz_size_by_name(
+               getGzipName($row->{'host'}, $row->{share}, $row->{'backupnum'})
+       );
+}
+
+sub getVolumes($) {
+       my $id = shift;
+
+       my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize";
+
+       my $sth = $dbh->prepare(qq{
+               select
+                       size
+               from backup_parts
+               where backup_id = ?
+               order by part_nr asc
+       });
+
+       $sth->execute($id);
+
+       my $cumulative_size = 0;
+       my $volumes = 1;
+
+       while(my ($size) = $sth->fetchrow_array) {
+               if ($cumulative_size + $size > $max_archive_size) {
+                       $volumes++;
+                       $cumulative_size = $size;
+               } else {
+                       $cumulative_size += $size;
+               }
+       }
+
+       return ($volumes,$cumulative_size);
+}
+
+sub getBackupsNotBurned($) {
+
+       my $param = shift;
+       my $dbh = get_dbh();
+
+       my $order = getSort('burn', 'sql', $param->{'sort'});
+
+print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql order: $order\n";
+
+       my $sql = qq{
+               SELECT 
+                       backups.hostID AS hostID,
+                       hosts.name AS host,
+                       shares.name AS share,
+                       backups.num AS backupnum,
+                       backups.type AS type,
+                       backups.date AS date,
+                       date_part('epoch',now()) - backups.date as age,
+                       backups.size AS size,
+                       backups.id AS id,
+                       backups.inc_size AS inc_size,
+                       backups.parts AS parts
+               FROM backups 
+               INNER JOIN shares       ON backups.shareID=shares.ID
+               INNER JOIN hosts        ON backups.hostID = hosts.ID
+               LEFT OUTER JOIN archive_backup ON archive_backup.backup_id = backups.id 
+               WHERE backups.inc_size > 0 AND backups.size > 0 AND backups.inc_deleted is false AND archive_backup.backup_id IS NULL AND backups.parts > 0
+               GROUP BY
+                       backups.hostID,
+                       hosts.name,
+                       shares.name,
+                       backups.num,
+                       backups.shareid,
+                       backups.id,
+                       backups.type,
+                       backups.date,
+                       backups.size,
+                       backups.inc_size,
+                       backups.parts
+               ORDER BY $order
+       };
+       my $sth = $dbh->prepare( $sql );
+       my @ret;
+       $sth->execute();
+
+       while ( my $row = $sth->fetchrow_hashref() ) {
+               $row->{'age'} = sprintf("%0.1f", ( $row->{'age'} / 86400 ) );
+               #$row->{'age'} = sprintf("%0.1f", ( (time() - $row->{'date'}) / 86400 ) );
+
+               my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize";
+               if ($row->{size} > $max_archive_size) {
+                       ($row->{volumes}, $row->{inc_size_calc}) = getVolumes($row->{id});
+               }
+
+               $row->{size} = sprintf("%0.2f", $row->{size} / 1024 / 1024);
+
+               # do some cluster calculation (approximate)
+               $row->{inc_size} = int(( ($row->{inc_size} + 1023 ) / 2 )  * 2);
+               $row->{inc_size_calc} ||= $row->{inc_size};
+               push @ret, $row;
+       }
+      
+       return @ret;
+}
+
+sub displayBackupsGrid($) {
+
+       my $param = shift;
+
+       my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize";
+       my $max_archive_file_size = $Conf{MaxArchiveFileSize}  || die "no MaxFileInSize";
+
+       my $retHTML .= q{
+               <form id="forma" method="POST" action="}.$MyURL.q{?action=burn">
+       };
+
+       $retHTML .= <<'EOF3';
+<style type="text/css">
+<!--
+DIV#fixedBox {
+       position: absolute;
+       top: 50em;
+       left: -24%;
+       padding: 0.5em;
+       width: 20%;
+       background-color: #E0F0E0;
+       border: 1px solid #00C000;
+}
+
+DIV#fixedBox, DIV#fixedBox INPUT, DIV#fixedBox TEXTAREA {
+       font-size: 10pt;
+}
+
+FORM>DIV#fixedBox {
+       position: fixed !important;
+       left: 0.5em !important;
+       top: auto !important;
+       bottom: 1em !important;
+       width: 15% !important;
+}
+
+DIV#fixedBox INPUT[type=text], DIV#fixedBox TEXTAREA {
+       border: 1px solid #00C000;
+}
+
+DIV#fixedBox #note {
+       display: block;
+       width: 100%;
+}
+
+DIV#fixedBox #submitBurner {
+       display: block;
+       width: 100%;
+       margin-top: 0.5em;
+       cursor: pointer;
+}
+
+* HTML {
+       overflow-y: hidden;
+}
+
+* HTML BODY {
+       overflow-y: auto;
+       height: 100%;
+       font-size: 100%;
+}
+
+* HTML DIV#fixedBox {
+       position: absolute;
+}
+
+#mContainer, #gradient, #mask, #progressIndicator {
+       display: block;
+       width: 100%;
+       font-size: 10pt;
+       font-weight: bold;
+       text-align: center;
+       vertical-align: middle;
+       padding: 1px;
+}
+
+#gradient, #mask, #progressIndicator {
+       left: 0;
+       border-width: 1px;
+       border-style: solid;
+       border-color: #000000;
+       color: #404040;
+       margin: 0.4em;
+       position: absolute;
+       margin-left: -1px;
+       margin-top: -1px;
+       margin-bottom: -1px;
+       overflow: hidden;
+}
+
+#mContainer {
+       display: block;
+       position: relative;
+       padding: 0px;
+       margin-top: 0.4em;
+       margin-bottom: 0.5em;
+}
+
+#gradient {
+       z-index: 1;
+       background-color: #FFFF00;
+}
+
+#mask {
+       z-index: 2;
+       background-color: #FFFFFF;
+}
+
+#progressIndicator {
+       z-index: 3;
+       background-color: transparent;
+}
+
+#volumes {
+       padding: 0.4em;
+       display: none;
+       width: 100%;
+       font-size: 80%;
+       color: #ff0000;
+       text-align: center;
+}
+-->
+</style>
+<script type="text/javascript">
+<!--
+
+var debug_div;
+EOF3
+
+       # take maximum archive size from configuration
+       $retHTML .= qq{
+var media_size = $max_archive_size ;
+var max_file_size = $max_archive_file_size;
+
+};
+
+       $retHTML .= <<'EOF3';
+
+function debug(msg) {
+       return; // Disable debugging
+
+       if (! debug_div) debug_div = document.getElementById('debug');
+
+       // this will create debug div if it doesn't exist.
+       if (! debug_div) {
+               debug_div = document.createElement('div');
+               if (document.body) document.body.appendChild(debug_div);
+               else debug_div = null;
+       }
+       if (debug_div) {
+               debug_div.appendChild(document.createTextNode(msg));
+               debug_div.appendChild(document.createElement("br"));
+       }
+}
+
+
+var element_id_cache = Array();
+
+function element_id(name,element) {
+       if (! element_id_cache[name]) {
+               element_id_cache[name] = self.document.getElementById(name);
+       }
+       return element_id_cache[name];
+}
+
+function checkAll(location) {
+       var f = element_id('forma') || null;
+       if (!f) return false;
+
+       var len = f.elements.length;
+       var check_all = element_id('allFiles');
+       var suma = check_all.checked ? (parseInt(f.elements['totalsize'].value) || 0) : 0;
+
+       for (var i = 0; i < len; i++) {
+               var e = f.elements[i];
+               if (e.name != 'all' && e.name.substr(0, 3) == 'fcb') {
+                       if (check_all.checked) {
+                               if (e.checked) continue;
+                               var el = element_id("fss" + e.name.substr(3));
+                               var size = parseInt(el.value) || 0;
+                               debug('suma: '+suma+' size: '+size);
+                               if ((suma + size) < media_size) {
+                                       suma += size;
+                                       e.checked = true;
+                               } else {
+                                       break;
+                               }
+                       } else {
+                               e.checked = false;
+                       }
+               }
+       }
+       update_sum(suma);
+}
+
+function update_sum(suma, suma_disp) {
+       if (! suma_disp) suma_disp = suma;
+       suma_disp = Math.floor(suma_disp / 1024);
+       element_id('forma').elements['totalsize_kb'].value = suma_disp;
+       element_id('forma').elements['totalsize'].value = suma;
+       pbar_set(suma, media_size);
+       debug('total size: ' + suma);
+}
+
+function update_size(name, checked, suma) {
+       var size = parseInt( element_id("fss" + name).value);
+
+       if (checked) {
+               suma += size;
+       } else {
+               suma -= size;
+       }
+
+       var volumes = parseInt( element_id("prt" + name).value);
+       debug('update_size('+name+','+checked+') suma: '+suma+' volumes: '+volumes);
+       if (volumes > 1) {
+               if (checked) {
+                       element_id("volumes").innerHTML = "This will take "+volumes+" mediums!";
+                       element_id("volumes").style.display = 'block';
+                       suma = size;
+                       update_sum(suma);
+               } else {
+                       suma -= size;
+                       element_id("volumes").style.display = 'none';
+               }
+       }
+
+       return suma;
+}
+
+function sumiraj(e) {
+       var suma = parseInt(element_id('forma').elements['totalsize'].value) || 0;
+       var len = element_id('forma').elements.length;
+       if (e) {
+               suma = update_size(e.name.substr(3), e.checked, suma);
+               if (suma < 0) suma = 0;
+       } else {
+               suma = 0;
+               for (var i = 0; i < len; i++) {
+                       var fel = element_id('forma').elements[i];
+                       if (fel.name != 'all' && fel.checked && fel.name.substr(0,3) == 'fcb') {
+                               suma = update_size(fel.name.substr(3), fel.checked, suma);
+                       } 
+               }
+       }
+       update_sum(suma);
+       return suma;
+}
+
+/* progress bar */
+
+var _pbar_width = null;
+var _pbar_warn = 10;   // change color in last 10%
+
+function pbar_reset() {
+       element_id("mask").style.left = "0px";
+       _pbar_width = element_id("mContainer").offsetWidth - 2;
+       element_id("mask").style.width = _pbar_width + "px";
+       element_id("mask").style.display = "block";
+       element_id("progressIndicator").style.zIndex  = 10;
+       element_id("progressIndicator").innerHTML = "0";
+}
+
+function dec2hex(d) {
+        var hch = '0123456789ABCDEF';
+        var a = d % 16;
+        var q = (d - a) / 16;
+        return hch.charAt(q) + hch.charAt(a);
+}
+
+function pbar_set(amount, max) {
+       debug('pbar_set('+amount+', '+max+')');
+
+       if (_pbar_width == null) {
+               var _mc = element_id("mContainer");
+               if (_pbar_width == null) _pbar_width = parseInt(_mc.offsetWidth ? (_mc.offsetWidth - 2) : 0) || null;
+               if (_pbar_width == null) _pbar_width = parseInt(_mc.clientWidth ? (_mc.clientWidth + 2) : 0) || null;
+               if (_pbar_width == null) _pbar_width = 0;
+       }
+
+       var pcnt = Math.floor(amount * 100 / max);
+       var p90 = 100 - _pbar_warn;
+       var pcol = pcnt - p90;
+       if (Math.round(pcnt) <= 100) {
+               if (pcol < 0) pcol = 0;
+               var e = element_id("submitBurner");
+               debug('enable_button');
+               e.disabled = false;
+               var a = e.getAttributeNode('disabled') || null;
+               if (a) e.removeAttributeNode(a);
+       } else {
+               debug('disable button');
+               pcol = _pbar_warn;
+               var e = element_id("submitBurner");
+               if (!e.disabled) e.disabled = true;
+       }
+       var col_g = Math.floor((_pbar_warn - pcol) * 255 / _pbar_warn);
+       var col = '#FF' + dec2hex(col_g) + '00';
+
+       //debug('pcol: '+pcol+' g:'+col_g+' _pbar_warn:'+ _pbar_warn + ' color: '+col);
+       element_id("gradient").style.backgroundColor = col;
+
+       element_id("progressIndicator").innerHTML = pcnt + '%';
+       //element_id("progressIndicator").innerHTML = amount;
+
+       element_id("mask").style.clip = 'rect(' + Array(
+               '0px',
+               element_id("mask").offsetWidth + 'px',
+               element_id("mask").offsetHeight + 'px',
+               Math.round(_pbar_width * amount / max) + 'px'
+       ).join(' ') + ')';
+}
+
+if (!self.body) self.body = new Object();
+self.onload = self.document.onload = self.body.onload = function() {
+       //pbar_reset();
+       sumiraj();
+};
+
+// -->
+</script>
+<div id="fixedBox">
+
+<input type="hidden" name="totalsize"/>
+Size: <input type="text" name="totalsize_kb" size="7" readonly="readonly" style="text-align:right;" value="0" /> kB
+
+<div id="mContainer">
+       <div id="gradient">&nbsp;</div>
+       <div id="mask">&nbsp;</div>
+       <div id="progressIndicator">0%</div>
+</div>
+<br/>
+
+<div id="volumes">&nbsp;</div>
+
+Note:
+<textarea name="note" cols="10" rows="5" id="note"></textarea>
+
+<input type="submit" id="submitBurner" value="Burn selected" name="submitBurner" />
+
+</div>
+<!--
+<div id="debug" style="float: right; width: 10em; border: 1px #ff0000 solid; background-color: #ffe0e0; -moz-opacity: 0.7;">
+no debug output yet
+</div>
+-->
+EOF3
+       $retHTML .= q{
+                       <input type="hidden" value="burn" name="action">
+                       <input type="hidden" value="results" name="search_results">
+                       <table style="fview" border="0" cellspacing="0" cellpadding="2">
+                       <tr class="tableheader">
+                       <td class="tableheader">
+                               <input type="checkbox" name="allFiles" id="allFiles" onClick="checkAll('allFiles');">
+                       </td>
+       } .
+               sort_header($param, 'Share', 'share', 'center') .
+               sort_header($param, '#', 'num', 'center') .
+       qq{
+                       <td align="center">Type</td>
+       } .
+               sort_header($param, 'Date', 'date', 'center') .
+               sort_header($param, 'Age/days', 'age', 'center') .
+               sort_header($param, 'Size/Mb', 'size', 'center') .
+               sort_header($param, 'gzip size/Kb', 'incsize', 'center') .
+       qq{
+                       <td align="center">medias</td></tr>
+       };
+
+       my @color = (' bgcolor="#e0e0e0"', '');
+
+       my $i = 0;
+       my $host = '';
+
+       foreach my $backup ( getBackupsNotBurned($param) ) {
+
+               if ($host ne $backup->{'host'}) {
+                       $i++;
+                       $host = $backup->{'host'};
+               }
+               my $ftype = "";
+
+               my $checkbox_key = $backup->{'hostid'}. '_' .$backup->{'backupnum'} . '_' . $backup->{'id'};
+
+               $retHTML .=
+                       '<tr' . $color[$i %2 ] . '>
+                       <td class="fview">';
+
+               if (($backup->{'inc_size'} || 0) > 0) {
+                       $retHTML .= '
+                       <input type="checkbox" name="fcb' . $checkbox_key . '" value="' . $checkbox_key . '" onClick="sumiraj(this);">';
+               }
+
+               my $img_url = $Conf{CgiImageDirURL};
+
+               $retHTML .=
+                       '</td>' .
+                       '<td align="right">' . $backup->{'host'} . ':' . $backup->{'share'} . '</td>' .
+                       '<td align="center">' . $backup->{'backupnum'} . '</td>' .
+                       '<td align="center">' . $backup->{'type'} . '</td>' .
+                       '<td align="center">' . epoch_to_iso( $backup->{'date'} ) . '</td>' .
+                       '<td align="center">' . $backup->{'age'} . '</td>' .
+                       '<td align="right">' . $backup->{'size'} . '</td>' .
+                       '<td align="right">' . sprintf("%0.1f", $backup->{'inc_size'} / 1024 ) .
+                       '<input type="hidden" id="fss'.$checkbox_key .'" value="'. $backup->{'inc_size_calc'} .'"></td>' .
+                       '<input type="hidden" id="prt'.$checkbox_key .'" value="'. $backup->{'volumes'} .'"></td>' .
+                       '<td align="left">' . ( qq{<img src="$img_url/icon-cd.gif" alt="media">} x $backup->{volumes} ) . '</td>' .
+
+                       "</tr>\n";
+       }
+
+       $retHTML .= "</table>";
+       $retHTML .= "</form>";
+      
+       return $retHTML;
+}      
+
+sub displayGrid($) {
+       my ($param) = @_;
+
+       my $offset = $param->{'offset'};
+       my $hilite = $param->{'search_filename'};
+
+       my $retHTML = "";
+       my $start_t = time();
+
+       my ($results, $files);
+       if ($param->{'use_hest'} && length($hilite) > 0) {
+               ($results, $files) = getFilesHyperEstraier($param);
+       } else {
+               ($results, $files) = getFiles($param);
+       }
+
+       my $dur_t = time() - $start_t;
+       my $dur = sprintf("%0.4fs", $dur_t);
+
+       my ($from, $to) = (($offset * $on_page) + 1, ($offset * $on_page) + $on_page);
+
+       if ($results <= 0) {
+               $retHTML .= qq{
+                       <p style="color: red;">No results found...</p>
+               };
+               return $retHTML;
+       } else {
+               # DEBUG
+               #use Data::Dumper;
+               #$retHTML .= '<pre>' . Dumper($files) . '</pre>';
+       }
+
+
+       $retHTML .= qq{
+       <div>
+       Found <b>$results files</b> showing <b>$from - $to</b> (took $dur)
+       </div>
+       <table style="fview" width="100%" border="0" cellpadding="2" cellspacing="0">
+               <tr class="fviewheader"> 
+               <td></td>
+       };
+
+       sub sort_header($$$$) {
+               my ($param, $display, $name, $align) = @_;
+
+               my ($sort_what, $sort_direction) = split(/_/,$param->{'sort'},2);
+
+               my $old_sort = $param->{'sort'};
+
+               my $html = qq{<td align="$align"};
+               my $arrow = '';
+
+               if (lc($sort_what) eq lc($name)) {
+                       my $direction = lc($sort_direction);
+
+                       # swap direction or fallback to default
+                       $direction =~ tr/ad/da/;
+                       $direction = 'a' unless ($direction =~ /[ad]/);
+
+                       $param->{'sort'} = $name . '_' . $direction;
+                       $html .= ' style="border: 1px solid #808080;"';
+               
+                       # add unicode arrow for direction
+                       $arrow .= '&nbsp;';
+                       $arrow .= $direction eq 'a'  ?  '&#9650;'
+                               : $direction eq 'd'  ?  '&#9660;'
+                               :                       ''
+                               ;
+
+               } else {
+                       $param->{'sort'} = $name . '_a';
+               }
+
+               $html .= '><a href="' . page_uri($param) . '">' . $display . '</a>' . $arrow . '</td>';
+               $param->{'sort'} = $old_sort;
+
+               return $html;
+       }
+
+       $retHTML .=
+               sort_header($param, 'Share', 'share', 'center') .
+               sort_header($param, 'Type and Name', 'path', 'center') .
+               sort_header($param, '#', 'num', 'center') .
+               sort_header($param, 'Size', 'size', 'center') .
+               sort_header($param, 'Date', 'date', 'center');
+
+       $retHTML .= qq{
+               <td align="center">Media</td>
+               </tr>
+       };
+
+       my $file;
+
+       sub hilite_html($$) {
+               my ($html, $search) = @_;
+               $html =~ s#($search)#<b>$1</b>#gis;
+               return $html;
+       }
+
+       sub restore_link($$$$$$) {
+               my $type = shift;
+               my $action = 'RestoreFile';
+               $action = 'browse' if (lc($type) eq 'dir');
+               return sprintf(qq{<a href="?action=%s&host=%s&num=%d&share=%s&dir=%s">%s</a>}, $action, @_);
+       }
+
+       my $sth_archived;
+       my %archived_cache;
+
+       sub check_archived($$$) {
+               my ($host, $share, $num) = @_;
+
+               if (my $html = $archived_cache{"$host $share $num"}) {
+                       return $html;
+               }
+
+               $sth_archived ||= $dbh->prepare(qq{
+                       select
+                               dvd_nr, note,
+                               count(archive_burned.copy) as copies
+                       from archive
+                       inner join archive_burned on archive_burned.archive_id = archive.id
+                       inner join archive_backup on archive.id = archive_backup.archive_id
+                       inner join backups on backups.id = archive_backup.backup_id
+                       inner join hosts on hosts.id = backups.hostid
+                       inner join shares on shares.id = backups.shareid
+                       where hosts.name = ? and shares.name = ? and backups.num = ?
+                       group by dvd_nr, note
+               });
+
+               my @mediums;
+
+               $sth_archived->execute($host, $share, $num);
+               while (my $row = $sth_archived->fetchrow_hashref()) {
+                       push @mediums, '<abbr title="' .
+                               $row->{'note'} .
+                               ' [' . $row->{'copies'} . ']' .
+                               '">' .$row->{'dvd_nr'} .
+                               '</abbr>';
+               }
+
+               my $html = join(", ",@mediums);
+               $archived_cache{"$host $share $num"} = $html;
+               return $html;
+       }
+
+       my $i = $offset * $on_page;
+
+       foreach $file (@{ $files }) {
+               $i++;
+
+               my $typeStr  = BackupPC::Attrib::fileType2Text(undef, $file->{'type'});
+               $retHTML .= qq{<tr class="fviewborder">};
+
+               $retHTML .= qq{<td class="fviewborder">$i</td>};
+
+               $retHTML .=
+                       qq{<td class="fviewborder" align="right">} . $file->{'hname'} . ':' . $file->{'sname'} . qq{</td>} .
+                       qq{<td class="fviewborder"><img src="$Conf{CgiImageDirURL}/icon-$typeStr.gif" alt="$typeStr" align="middle">&nbsp;} . hilite_html( $file->{'filepath'}, $hilite ) . qq{</td>} .
+                       qq{<td class="fviewborder" align="center">} . restore_link( $typeStr, ${EscURI( $file->{'hname'} )}, $file->{'backupnum'}, ${EscURI( $file->{'sname'})}, ${EscURI( $file->{'filepath'} )}, $file->{'backupnum'} ) . qq{</td>} .
+                       qq{<td class="fviewborder" align="right">} . $file->{'size'} . qq{</td>} .
+                       qq{<td class="fviewborder">} . epoch_to_iso( $file->{'date'} ) . qq{</td>} .
+                       qq{<td class="fviewborder">} . check_archived( $file->{'hname'}, $file->{'sname'}, $file->{'backupnum'} ) . qq{</td>};
+
+               $retHTML .= "</tr>";
+       }
+       $retHTML .= "</table>";
+
+       # all variables which has to be transfered
+       foreach my $n (qw/search_day_from search_month_from search_year_from search_day_to search_month_to search_year_to search_backup_day_from search_backup_month_from search_backup_year_from search_backup_day_to search_backup_month_to search_backup_year_to search_filename offset/) {
+               $retHTML .= qq{<INPUT TYPE="hidden" NAME="$n" VALUE="$In{$n}">\n};
+       }
+
+       my $del = '';
+       my $max_page = int( $results / $on_page );
+       my $page = 0;
+
+       sub page_uri($) {
+               my $param = shift || die "no param?";
+
+               my $uri = $MyURL;
+               my $del = '?';
+               foreach my $k (keys %{ $param }) {
+                       if ($param->{$k}) {
+                               $uri .= $del . $k . '=' . ${EscURI( $param->{$k} )};
+                               $del = '&';
+                       }
+               }
+               return $uri;
+       }
+
+       sub page_link($$$) {
+               my ($param,$page,$display) = @_;
+
+               $param->{'offset'} = $page if (defined($page));
+
+               my $html = '<a href = "' . page_uri($param) . '">' . $display . '</a>';
+       }
+
+       $retHTML .= '<div style="text-align: center;">';
+
+       if ($offset > 0) {
+               $retHTML .= page_link($param, $offset - 1, '&lt;&lt;') . ' ';
+       }
+
+       while ($page <= $max_page) {
+               if ($page == $offset) {
+                       $retHTML .= $del . '<b>' . ($page + 1) . '</b>';
+               } else {
+                       $retHTML .= $del . page_link($param, $page, $page + 1);
+               }
+
+               if ($page < $offset - $pager_pages && $page != 0) {
+                       $retHTML .= " ... ";
+                       $page = $offset - $pager_pages;
+                       $del = '';
+               } elsif ($page > $offset + $pager_pages && $page != $max_page) {
+                       $retHTML .= " ... ";
+                       $page = $max_page;
+                       $del = '';
+               } else {
+                       $del = ' | ';
+                       $page++;
+               }
+       }
+
+       if ($offset < $max_page) {
+               $retHTML .= ' ' . page_link($param, $offset + 1, '&gt;&gt;');
+       }
+
+       $retHTML .= "</div>";
+
+       return $retHTML;
+}
+
+1;