X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=lib%2FBackupPC%2FSearchLib.pm;h=792d5b9a6f62c63003232f7da318bf9f3829abf6;hb=bc4ccaeb2aa4b32cc135e9cf69f75f502ad50a06;hp=4965a86b490809a8a660e8fa7f480fbea7c802f4;hpb=f4e195d8ca8a37e34c4b1f1422673fe523838d4f;p=BackupPC.git diff --git a/lib/BackupPC/SearchLib.pm b/lib/BackupPC/SearchLib.pm index 4965a86..792d5b9 100644 --- a/lib/BackupPC/SearchLib.pm +++ b/lib/BackupPC/SearchLib.pm @@ -8,6 +8,8 @@ use DBI; use DateTime; use vars qw(%In $MyURL); use Time::HiRes qw/time/; +use XML::Writer; +use IO::File; my $on_page = 100; my $pager_pages = 10; @@ -15,295 +17,1023 @@ my $pager_pages = 10; my $dsn = $Conf{SearchDSN}; my $db_user = $Conf{SearchUser} || ''; +my $hest_node_url = $Conf{HyperEstraierIndex}; + +my $dbh; +my $Dir = $Conf{InstallDir}."/data/log"; +open(LOG, ">>", "$Dir/LOG") +select(LOG); +$|=1; + + +sub get_dbh { + $dbh ||= DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } ); + return $dbh; +} + sub getUnits() { - my @ret = (); - my $tmp; - my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } ); - my $st = - $dbh->prepare( - " SELECT shares.ID AS ID, shares.share AS name FROM shares;"); - $st->execute(); - push (@ret, { 'ID' => '', 'name' => '-'}); - while ( $tmp = $st->fetchrow_hashref() ) { - push( @ret, { 'ID' => $tmp->{'ID'}, 'name' => $tmp->{'name'} } ); - } - $dbh->disconnect(); - return @ret; + my @ret; + + my $dbh = get_dbh(); + my $sth = $dbh->prepare(qq{ + SELECT + shares.id as id, + hosts.name || ':' || shares.name as share + FROM shares + JOIN hosts on hostid = hosts.id + ORDER BY share + } ); + $sth->execute(); + push @ret, { 'id' => '', 'share' => '-'}; # dummy any + + while ( my $row = $sth->fetchrow_hashref() ) { + push @ret, $row; + } + return @ret; } sub epoch_to_iso { my $t = shift || return; - my $dt = DateTime->from_epoch( epoch => $t ) || return; -print STDERR "$t == ",$dt->epoch,"\n"; - return $dt->ymd . ' ' . $dt->hms; + my $iso = BackupPC::Lib::timeStamp(undef, $t); + $iso =~ s/\s/ /g; + return $iso; } -sub getWhere($) { - my ($param) = @_; - my @conditions; +sub dates_from_form($) { + my $param = shift || return; sub mk_epoch_date($$) { my ($name,$suffix) = @_; - my $yyyy = $param->{ $name . '_year_' . $suffix} || return; + my $yyyy = $param->{ $name . '_year_' . $suffix} || return undef; my $mm .= $param->{ $name . '_month_' . $suffix} || ( $suffix eq 'from' ? 1 : 12); my $dd .= $param->{ $name . '_day_' . $suffix} || ( $suffix eq 'from' ? 1 : 31); + + $yyyy =~ s/\D//g; + $mm =~ s/\D//g; + $dd =~ s/\D//g; + + my $h = my $m = my $s = 0; + if ($suffix eq 'to') { + $h = 23; + $m = 59; + $s = 59; + } + my $dt = new DateTime( year => $yyyy, month => $mm, - day => $dd + day => $dd, + hour => $h, + minute => $m, + second => $s, ); + print STDERR "mk_epoch_date($name,$suffix) [$yyyy-$mm-$dd] = " . $dt->ymd . " " . $dt->hms . "\n"; return $dt->epoch || 'NULL'; } - my $backup_from = mk_epoch_date('search_backup', 'from'); + my @ret = ( + mk_epoch_date('search_backup', 'from'), + mk_epoch_date('search_backup', 'to'), + mk_epoch_date('search', 'from'), + mk_epoch_date('search', 'to'), + ); + + return @ret; + +} + + +sub getWhere($) { + my $param = shift || return; + + my ($backup_from, $backup_to, $files_from, $files_to) = dates_from_form($param); + + my @conditions; push @conditions, qq{ backups.date >= $backup_from } if ($backup_from); - my $backup_to = mk_epoch_date('search_backup', 'to'); push @conditions, qq{ backups.date <= $backup_to } if ($backup_to); - - my $files_from = mk_epoch_date('search', 'from'); push @conditions, qq{ files.date >= $files_from } if ($files_from); - my $files_to = mk_epoch_date('search', 'to'); push @conditions, qq{ files.date <= $files_to } if ($files_to); - print STDERR "backup: $backup_from - $backup_to files: $files_from - $files_to cond:",join(" | ",@conditions); - - push( @conditions, ' backups.hostID = ' . $param->{'search_host'} ) if ($param->{'search_host'}); + print STDERR "backup: $backup_from - $backup_to files: $files_from - $files_to cond:" . join(" and ",@conditions); - push (@conditions, " upper(files.name) LIKE upper('%".$param->{'search_filename'}."%')") if ($param->{'search_filename'}); + push( @conditions, ' files.shareid = ' . $param->{'search_share'} ) if ($param->{'search_share'}); + push (@conditions, " upper(files.path) LIKE upper('%".$param->{'search_filename'}."%')") if ($param->{'search_filename'}); - return ( - join(" and ", @conditions), - $files_from, $files_to, - $backup_from, $backup_to - ); + if ( $param->{burned} ) { + my $is_what = 'is null'; + $is_what = '= 1' if ($param->{burned} eq 'burned'); + push @conditions, "archive_burned.part $is_what"; + push @conditions, "archive_burned.copy $is_what"; + } + + return join(" and ", @conditions); } +my $sort_def = { + search => { + default => 'date_a', + sql => { + share_d => 'shares.name DESC', + share_a => 'shares.name ASC', + path_d => 'files.path DESC', + path_a => 'files.path ASC', + num_d => 'files.backupnum DESC', + num_a => 'files.backupnum ASC', + size_d => 'files.size DESC', + size_a => 'files.size ASC', + date_d => 'files.date DESC', + date_a => 'files.date ASC', + }, + est => { + share_d => 'sname STRD', + share_a => 'sname STRA', + path_d => 'filepath STRD', + path_a => 'filepath STRA', + num_d => 'backupnum NUMD', + num_a => 'backupnum NUMA', + size_d => 'size NUMD', + size_a => 'size NUMA', + date_d => 'date NUMD', + date_a => 'date NUMA', + } + }, burn => { + default => 'date_a', + sql => { + share_d => 'host DESC, share DESC', + share_a => 'host ASC, share ASC', + num_d => 'backupnum DESC', + num_a => 'backupnum ASC', + date_d => 'date DESC', + date_a => 'date ASC', + age_d => 'age DESC', + age_a => 'age ASC', + size_d => 'size DESC', + size_a => 'size ASC', + incsize_d => 'inc_size DESC', + incsize_a => 'inc_size ASC', + } + } +}; + +sub getSort($$$) { + my ($part,$type, $sort_order) = @_; -sub getFiles($$) { - my ($where, $offset) = @_; + die "unknown part: $part" unless ($sort_def->{$part}); + die "unknown type: $type" unless ($sort_def->{$part}->{$type}); - my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } ); + $sort_order ||= $sort_def->{$part}->{'default'}; + + if (my $ret = $sort_def->{$part}->{$type}->{$sort_order}) { + return $ret; + } else { + # fallback to default sort order + return $sort_def->{$part}->{$type}->{ $sort_def->{$part}->{'default'} }; + } +} + +sub getFiles($) { + my ($param) = @_; + + my $offset = $param->{'offset'} || 0; + $offset *= $on_page; + + my $dbh = get_dbh(); my $sql_cols = qq{ files.id AS fid, hosts.name AS hname, shares.name AS sname, - shares.share AS sharename, - files.backupNum AS backupNum, - files.name AS filename, + files.backupnum AS backupnum, files.path AS filepath, - shares.share||files.fullpath AS networkPath, files.date AS date, - files.type AS filetype, - files.size AS size, - }; - - my $sql_dvd_cols = qq{ - dvds.name AS dvd + files.type AS type, + files.size AS size }; my $sql_from = qq{ FROM files INNER JOIN shares ON files.shareID=shares.ID INNER JOIN hosts ON hosts.ID = shares.hostID - INNER JOIN backups ON backups.num = files.backupNum and backups.hostID = hosts.ID - }; - - my $sql_dvd_from = qq{ - LEFT JOIN dvds ON dvds.ID = files.dvdid + INNER JOIN backups ON backups.num = files.backupnum and backups.hostID = hosts.ID AND backups.shareID = files.shareID }; my $sql_where; + my $where = getWhere($param); $sql_where = " WHERE ". $where if ($where); + # do we have to add tables for burned media? + if ( $param->{burned} ) { + $sql_from .= qq{ + LEFT OUTER JOIN archive_backup on archive_backup.backup_id = backups.id + LEFT OUTER JOIN archive_burned on archive_burned.archive_id = archive_backup.archive_id + }; + } + + my $order = getSort('search', 'sql', $param->{'sort'}); + my $sql_order = qq{ - ORDER BY files.id - LIMIT $on_page - OFFSET ? + ORDER BY $order + LIMIT $on_page + OFFSET ? }; - $offset ||= 0; - $offset = ($offset * $on_page) + 1; + my $sql_count = qq{ select count(files.id) $sql_from $sql_where }; + my $sql_results = qq{ select $sql_cols $sql_from $sql_where $sql_order }; - my $sth = $dbh->prepare(qq{ select count(files.id) $sql_from $sql_where }); + my $sth = $dbh->prepare($sql_count); $sth->execute(); - my ($results) = $sth->fetchrow_array(); - $sth = $dbh->prepare(qq{ select $sql_cols $sql_dvd_cols $sql_from $sql_dvd_from $sql_where $sql_order }); + $sth = $dbh->prepare($sql_results); $sth->execute( $offset ); + if ($sth->rows != $results) { + my $bug = "$0 BUG: [[ $sql_count ]] = $results while [[ $sql_results ]] = " . $sth->rows; + $bug =~ s/\s+/ /gs; + print STDERR "$bug\n"; + } + my @ret; while (my $row = $sth->fetchrow_hashref()) { - push(@ret, { - 'hname' => $row->{'hname'}, - 'sname' => $row->{'sname'}, - 'sharename' => $row->{'sharename'}, - 'backupno' => $row->{'backupnum'}, - 'fname' => $row->{'filename'}, - 'fpath' => $row->{'filepath'}, - 'networkpath' => $row->{'networkpath'}, - 'date' => $row->{'date'}, - 'type' => $row->{'filetype'}, - 'size' => $row->{'size'}, - 'id' => $row->{'fid'}, - 'dvd' => $row->{'dvd'} - }); + push @ret, $row; } - + $sth->finish(); - $dbh->disconnect(); return ($results, \@ret); } -sub getBackupsNotBurned() { - - my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } ); - my $sql = q{ - SELECT - hosts.ID AS hostid, - min(hosts.name) AS host, - backups.num AS backupno, - min(backups.type) AS type, - min(backups.date) AS date - FROM backups, shares, files, hosts - WHERE - backups.num = files.backupNum AND - shares.ID = files.shareID AND - backups.hostID = shares.hostID AND - hosts.ID = backups.hostID AND - files.dvdid IS NULL - GROUP BY - backups.hostID, backups.num, hosts.id +sub getHyperEstraier_url($) { + my ($use_hest) = @_; + + return unless $use_hest; + + use Search::Estraier 0.04; + die "direct access to Hyper Estraier datatase is no longer supported. Please use estmaster\n" + unless ($use_hest =~ m#^http://#); + + return $use_hest; +} + +sub getFilesHyperEstraier($) { + my ($param) = @_; + + my $offset = $param->{'offset'} || 0; + $offset *= $on_page; + + die "no Hyper Estraier node URL?" unless ($hest_node_url); + + # open the database + my $db; + if ($hest_node_url) { + $db ||= Search::Estraier::Node->new($hest_node_url); + $db->set_auth('admin', 'admin'); + } else { + die "BUG: unimplemented"; + } + + # create a search condition object + my $cond = Search::Estraier::Condition->new(); + + my $q = $param->{'search_filename'}; + my $shareid = $param->{'search_share'}; + + if (length($q) > 0) { + # exact match + $cond->add_attr("filepath ISTRINC $q"); + + $q =~ s/(.)/$1 /g; + # set the search phrase to the search condition object + $cond->set_phrase($q); + } + + my ($backup_from, $backup_to, $files_from, $files_to) = dates_from_form($param); + + $cond->add_attr("backup_date NUMGE $backup_from") if ($backup_from); + $cond->add_attr("backup_date NUMLE $backup_to") if ($backup_to); + + $cond->add_attr("date NUMGE $files_from") if ($files_from); + $cond->add_attr("date NUMLE $files_to") if ($files_to); + + $cond->add_attr("shareid NUMEQ $shareid") if ($shareid); + + $cond->set_max( $offset + $on_page ); + $cond->set_options( 'SURE' ); + $cond->set_order( getSort('search', 'est', $param->{'sort'} ) ); + + # get the result of search + my @res; + my ($result, $hits); + + if ($hest_node_url) { + $result = $db->search($cond, 0); + if ($result) { + $hits = $result->hits; + } else { + $hits = 0; + return ($hits,[]); + } + } else { + die "BUG: unimplemented"; + } + + # for each document in result + for my $i ($offset .. ($offset + $on_page - 1)) { + last if ($i >= $result->doc_num); + + my $doc; + if ($hest_node_url) { + $doc = $result->get_doc($i); + } else { + die "BUG: unimplemented"; + } + + my $row; + foreach my $c (qw/fid hname sname backupnum filepath date type size/) { + $row->{$c} = $doc->attr($c); + } + push @res, $row; + } + + return ($hits, \@res); +} + +sub getGzipName($$$) +{ + my ($host, $share, $backupnum) = @_; + my $ret = $Conf{GzipSchema}; + + $share =~ s/\//_/g; + $ret =~ s/\\h/$host/ge; + $ret =~ s/\\s/$share/ge; + $ret =~ s/\\n/$backupnum/ge; + + $ret =~ s/__+/_/g; + + return $ret; + +} + +sub get_tgz_size_by_name($) { + my $name = shift; + + my $tgz = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}.'/'.$name; + my $size = -1; + + if (-f "${tgz}.tar.gz") { + print "stating ${tgz}.tar.gz..."; + $size = (stat("${tgz}.tar.gz"))[7]; + print "size: $size\n"; + } elsif (-d $tgz) { + print "$tgz is dir, stating files in it...\n"; + opendir(my $dir, $tgz) || die "can't opendir $tgz: $!"; + my @parts = grep { !/^\./ && !/md5/ && -f "$tgz/$_" } readdir($dir); + $size = 0; + foreach my $part (@parts) { + my $currSize = (stat("$tgz/$part"))[7]; + $size += (stat("$tgz/$part"))[7] || die "can't stat $tgz/$part: $!"; + print "\t$tgz/$part: $currSize\n"; + } + print "\ttotal $size\n"; + + closedir $dir; + } else { + return -1; + } + + return $size; +} + +sub getGzipSize($$) +{ + my ($hostID, $backupNum) = @_; + my $sql; + my $dbh = get_dbh(); + + $sql = q{ + SELECT hosts.name as host, + shares.name as share, + backups.num as backupnum + FROM hosts, backups, shares + WHERE shares.id=backups.shareid AND + hosts.id =backups.hostid AND + hosts.id=? AND + backups.num=? + }; + my $sth = $dbh->prepare($sql); + $sth->execute($hostID, $backupNum); + + my $row = $sth->fetchrow_hashref(); + + return get_tgz_size_by_name( + getGzipName($row->{'host'}, $row->{share}, $row->{'backupnum'}) + ); +} + +sub getVolumes($) { + my $id = shift; + + my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; + + my $sth = $dbh->prepare(qq{ + select + size + from backup_parts + where backup_id = ? + order by part_nr asc + }); + + $sth->execute($id); + + my $cumulative_size = 0; + my $volumes = 1; + + while(my ($size) = $sth->fetchrow_array) { + if ($cumulative_size + $size > $max_archive_size) { + $volumes++; + $cumulative_size = $size; + } else { + $cumulative_size += $size; + } + } + + return ($volumes,$cumulative_size); +} + +sub getBackupsNotBurned($) { + + my $param = shift; + my $dbh = get_dbh(); + + my $order = getSort('burn', 'sql', $param->{'sort'}); + +print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql order: $order\n"; + + my $sql = qq{ + SELECT + backups.hostID AS hostID, + hosts.name AS host, + shares.name AS share, + backups.num AS backupnum, + backups.type AS type, + backups.date AS date, + date_part('epoch',now()) - backups.date as age, + backups.size AS size, + backups.id AS id, + backups.inc_size AS inc_size, + backups.parts AS parts + FROM backups + INNER JOIN shares ON backups.shareID=shares.ID + INNER JOIN hosts ON backups.hostID = hosts.ID + LEFT OUTER JOIN archive_backup ON archive_backup.backup_id = backups.id + WHERE backups.inc_size > 0 AND backups.size > 0 AND backups.inc_deleted is false AND archive_backup.backup_id IS NULL AND backups.parts > 0 + GROUP BY + backups.hostID, + hosts.name, + shares.name, + backups.num, + backups.shareid, + backups.id, + backups.type, + backups.date, + backups.size, + backups.inc_size, + backups.parts + ORDER BY $order }; my $sth = $dbh->prepare( $sql ); my @ret; $sth->execute(); - while ( my $row = $sth->fetchrow_hashref() ) { - push(@ret, { - 'host' => $row->{'host'}, - 'hostid' => $row->{'hostid'}, - 'backupno' => $row->{'backupno'}, - 'type' => $row->{'type'}, - 'date' => $row->{'date'} - } - ); + while ( my $row = $sth->fetchrow_hashref() ) { + $row->{'age'} = sprintf("%0.1f", ( $row->{'age'} / 86400 ) ); + #$row->{'age'} = sprintf("%0.1f", ( (time() - $row->{'date'}) / 86400 ) ); + + my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; + if ($row->{size} > $max_archive_size) { + ($row->{volumes}, $row->{inc_size_calc}) = getVolumes($row->{id}); + } + + $row->{size} = sprintf("%0.2f", $row->{size} / 1024 / 1024); + + # do some cluster calculation (approximate) + $row->{inc_size} = int(( ($row->{inc_size} + 1023 ) / 2 ) * 2); + $row->{inc_size_calc} ||= $row->{inc_size}; + push @ret, $row; } - return @ret; + return @ret; } -sub displayBackupsGrid() - { - my $retHTML = ""; - my $addForm = 1; - - if ($addForm) { +sub displayBackupsGrid($) { + + my $param = shift; + + my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; + my $max_archive_file_size = $Conf{MaxArchiveFileSize} || die "no MaxFileInSize"; - $retHTML .= < + my $retHTML .= q{ +
+ }; + + $retHTML .= <<'EOF3'; + + +
+ + +Size: kB + +
+
 
+
 
+
0%
+
+
+ +
 
+ +Note: + + + + +
+ +EOF3 + $retHTML .= q{ + + + + + + } . + sort_header($param, 'Share', 'share', 'center') . + sort_header($param, '#', 'num', 'center') . + qq{ + + } . + sort_header($param, 'Date', 'date', 'center') . + sort_header($param, 'Age/days', 'age', 'center') . + sort_header($param, 'Size/Mb', 'size', 'center') . + sort_header($param, 'gzip size/Kb', 'incsize', 'center') . + qq{ + + }; + + my @color = (' bgcolor="#e0e0e0"', ''); + + my $i = 0; + my $host = ''; + + foreach my $backup ( getBackupsNotBurned($param) ) { + + if ($host ne $backup->{'host'}) { + $i++; + $host = $backup->{'host'}; + } + my $ftype = ""; + + my $checkbox_key = $backup->{'hostid'}. '_' .$backup->{'backupnum'} . '_' . $backup->{'id'}; + + $retHTML .= + ' + ' . + '' . + '' . + '' . + '' . + '' . + '' . + '' . + '' . + '' . + + "\n"; + } + + $retHTML .= "
+ + Typemedias
'; + + if (($backup->{'inc_size'} || 0) > 0) { + $retHTML .= ' + '; + } + + my $img_url = $Conf{CgiImageDirURL}; + + $retHTML .= + '' . $backup->{'host'} . ':' . $backup->{'share'} . '' . $backup->{'backupnum'} . '' . $backup->{'type'} . '' . epoch_to_iso( $backup->{'date'} ) . '' . $backup->{'age'} . '' . $backup->{'size'} . '' . sprintf("%0.1f", $backup->{'inc_size'} / 1024 ) . + '' . ( qq{media} x $backup->{volumes} ) . '
"; + $retHTML .= "
"; return $retHTML; } -sub displayGrid($$$$) { - my ($where, $addForm, $offset, $hilite) = @_; +sub displayGrid($) { + my ($param) = @_; + + my $offset = $param->{'offset'}; + my $hilite = $param->{'search_filename'}; + my $retHTML = ""; - if ($addForm) { - $retHTML .= qq{
}; - $retHTML.= qq{}; - $retHTML .= qq{}; - } - my $start_t = time(); - my ($results, $files) = getFiles($where, $offset); + my ($results, $files); + if ($param->{'use_hest'} && length($hilite) > 0) { + ($results, $files) = getFilesHyperEstraier($param); + } else { + ($results, $files) = getFiles($param); + } my $dur_t = time() - $start_t; my $dur = sprintf("%0.4fs", $dur_t); my ($from, $to) = (($offset * $on_page) + 1, ($offset * $on_page) + $on_page); + if ($results <= 0) { + $retHTML .= qq{ +

No results found...

+ }; + return $retHTML; + } else { + # DEBUG + #use Data::Dumper; + #$retHTML .= '
' . Dumper($files) . '
'; + } + + + $retHTML .= qq{ +
+ Found $results files showing $from - $to (took $dur) +
+ + + + }; + + sub sort_header($$$$) { + my ($param, $display, $name, $align) = @_; + + my ($sort_what, $sort_direction) = split(/_/,$param->{'sort'},2); + + my $old_sort = $param->{'sort'}; + + my $html = qq{'; + $param->{'sort'} = $old_sort; + + return $html; + } + + $retHTML .= + sort_header($param, 'Share', 'share', 'center') . + sort_header($param, 'Type and Name', 'path', 'center') . + sort_header($param, '#', 'num', 'center') . + sort_header($param, 'Size', 'size', 'center') . + sort_header($param, 'Date', 'date', 'center'); + $retHTML .= qq{ -
Found $results files showing $from - $to (took $dur) -
{'sort'} = $name . '_' . $direction; + $html .= ' style="border: 1px solid #808080;"'; + + # add unicode arrow for direction + $arrow .= ' '; + $arrow .= $direction eq 'a' ? '▲' + : $direction eq 'd' ? '▼' + : '' + ; + + } else { + $param->{'sort'} = $name . '_a'; + } + + $html .= '>' . $display . '' . $arrow . '
- - - - - - - - + }; @@ -322,21 +1052,63 @@ sub displayGrid($$$$) { return sprintf(qq{%s}, $action, @_); } + my $sth_archived; + my %archived_cache; + + sub check_archived($$$) { + my ($host, $share, $num) = @_; + + if (my $html = $archived_cache{"$host $share $num"}) { + return $html; + } + + $sth_archived ||= $dbh->prepare(qq{ + select + dvd_nr, note, + count(archive_burned.copy) as copies + from archive + inner join archive_burned on archive_burned.archive_id = archive.id + inner join archive_backup on archive.id = archive_backup.archive_id + inner join backups on backups.id = archive_backup.backup_id + inner join hosts on hosts.id = backups.hostid + inner join shares on shares.id = backups.shareid + where hosts.name = ? and shares.name = ? and backups.num = ? + group by dvd_nr, note + }); + + my @mediums; + + $sth_archived->execute($host, $share, $num); + while (my $row = $sth_archived->fetchrow_hashref()) { + push @mediums, '' .$row->{'dvd_nr'} . + ''; + } + + my $html = join(", ",@mediums); + $archived_cache{"$host $share $num"} = $html; + return $html; + } + + my $i = $offset * $on_page; + foreach $file (@{ $files }) { + $i++; + my $typeStr = BackupPC::Attrib::fileType2Text(undef, $file->{'type'}); - $retHTML .= ""; - - foreach my $v (( - $file->{'sharename'}, - qq{ } . hilite_html( $file->{'fpath'}, $hilite ), - $typeStr, - restore_link( $typeStr, $file->{'hname'}, $file->{'backupno'}, $file->{'sname'}, $file->{'fpath'}, $file->{'backupno'} ), - $file->{'size'}, - epoch_to_iso( $file->{'date'} ), - $file->{'dvd'} - )) { - $retHTML .= qq{}; - } + $retHTML .= qq{}; + + $retHTML .= qq{}; + + $retHTML .= + qq{} . + qq{} . + qq{} . + qq{} . + qq{} . + qq{}; $retHTML .= ""; } @@ -351,19 +1123,39 @@ sub displayGrid($$$$) { my $max_page = int( $results / $on_page ); my $page = 0; - my $link_fmt = '%s'; + sub page_uri($) { + my $param = shift || die "no param?"; + + my $uri = $MyURL; + my $del = '?'; + foreach my $k (keys %{ $param }) { + if ($param->{$k}) { + $uri .= $del . $k . '=' . ${EscURI( $param->{$k} )}; + $del = '&'; + } + } + return $uri; + } + + sub page_link($$$) { + my ($param,$page,$display) = @_; + + $param->{'offset'} = $page if (defined($page)); + + my $html = '' . $display . ''; + } $retHTML .= '
'; if ($offset > 0) { - $retHTML .= sprintf($link_fmt, $offset - 1, '<<') . ' '; + $retHTML .= page_link($param, $offset - 1, '<<') . ' '; } while ($page <= $max_page) { if ($page == $offset) { $retHTML .= $del . '' . ($page + 1) . ''; } else { - $retHTML .= $del . sprintf($link_fmt, $page, $page + 1); + $retHTML .= $del . page_link($param, $page, $page + 1); } if ($page < $offset - $pager_pages && $page != 0) { @@ -381,13 +1173,11 @@ sub displayGrid($$$$) { } if ($offset < $max_page) { - $retHTML .= ' ' . sprintf($link_fmt, $offset + 1, '>>'); + $retHTML .= ' ' . page_link($param, $offset + 1, '>>'); } $retHTML .= "
"; - $retHTML .= "" if ($addForm); - return $retHTML; }
ShareNameType#SizeDateMediaMedia
$v
$i} . $file->{'hname'} . ':' . $file->{'sname'} . qq{$typeStr } . hilite_html( $file->{'filepath'}, $hilite ) . qq{} . restore_link( $typeStr, ${EscURI( $file->{'hname'} )}, $file->{'backupnum'}, ${EscURI( $file->{'sname'})}, ${EscURI( $file->{'filepath'} )}, $file->{'backupnum'} ) . qq{} . $file->{'size'} . qq{} . epoch_to_iso( $file->{'date'} ) . qq{} . check_archived( $file->{'hname'}, $file->{'sname'}, $file->{'backupnum'} ) . qq{