X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=blobdiff_plain;f=lib%2FBackupPC%2FSearch.pm;h=a6f01ff5b5e3e92d677021340ec93c5739273abe;hp=479b51d70546c13f5eba2781b45c02f68ed2b7a4;hb=c615e1bfc6f2b0604a2658996f5dd6b6cfa9469c;hpb=aff621b091597104904eafcb4ad6d9fabc2bf7bb diff --git a/lib/BackupPC/Search.pm b/lib/BackupPC/Search.pm index 479b51d..a6f01ff 100644 --- a/lib/BackupPC/Search.pm +++ b/lib/BackupPC/Search.pm @@ -12,6 +12,10 @@ use XML::Writer; use IO::File; use Data::Dump qw(dump); +require Exporter; +our @ISA=qw(Exporter); +our @EXPORT=qw(unit); + my $on_page = 100; my $pager_pages = 10; @@ -28,7 +32,7 @@ sub search_module { if ( $@ ) { warn "ERROR: $search_module: $!"; } else { - warn "# using $search_module for full-text search"; + #warn "# using $search_module for full-text search"; } return $search_module->new( %Conf ); @@ -75,31 +79,28 @@ sub dates_from_form($) { sub mk_epoch_date($$) { my ($name,$suffix) = @_; - my $yyyy = $param->{ $name . '_year_' . $suffix} || return undef; - my $mm .= $param->{ $name . '_month_' . $suffix} || - ( $suffix eq 'from' ? 1 : 12); - my $dd .= $param->{ $name . '_day_' . $suffix} || - ( $suffix eq 'from' ? 1 : 31); + my $yyyy = $param->{ $name . '_year_' . $suffix} || return undef; + my $mm = $param->{ $name . '_month_' . $suffix}; + my $dd = $param->{ $name . '_day_' . $suffix}; $yyyy =~ s/\D//g; - $mm =~ s/\D//g; - $dd =~ s/\D//g; - - my $h = my $m = my $s = 0; - if ($suffix eq 'to') { - $h = 23; - $m = 59; - $s = 59; - } + $mm =~ s/\D//g; + $dd =~ s/\D//g; my $dt = new DateTime( - year => $yyyy, - month => $mm, - day => $dd, - hour => $h, - minute => $m, - second => $s, + year => $yyyy, + month => $mm || 1, + day => $dd || 1, + hour => 0, + minute => 0, + second => 0, ); + if ( $suffix eq 'to' && ( ! $mm || ! $dd ) ) { + $dt += DateTime::Duration->new( years => 1 ) if ! $mm; + $dt += DateTime::Duration->new( months => 1 ) if ! $dd; + $dt -= DateTime::Duration->new( days => 1 ); + } + print STDERR "mk_epoch_date($name,$suffix) [$yyyy-$mm-$dd] = " . $dt->ymd . " " . $dt->hms . "\n"; return $dt->epoch || 'NULL'; } @@ -132,12 +133,7 @@ sub getWhere($) { push( @conditions, ' files.shareid = ' . $param->{'search_share'} ) if ($param->{'search_share'}); push (@conditions, " upper(files.path) LIKE upper('%".$param->{'search_filename'}."%')") if ($param->{'search_filename'}); - if ( $param->{burned} ) { - my $is_what = 'is null'; - $is_what = '= 1' if ($param->{burned} eq 'burned'); - push @conditions, "archive_burned.part $is_what"; - push @conditions, "archive_burned.copy $is_what"; - } + push @conditions, join(' ' , 'burned is', $param->{burned} eq 'burned' ? '' : 'not', 'true') if $param->{burned}; return join(" and ", @conditions); } @@ -225,43 +221,31 @@ sub getFiles($) { # do we have to add tables for burned media? if ( $param->{burned} ) { $sql_from .= qq{ - LEFT OUTER JOIN archive_backup on archive_backup.backup_id = backups.id - LEFT OUTER JOIN archive_burned on archive_burned.archive_id = archive_backup.archive_id + LEFT OUTER JOIN backups_burned on backup_id = backups.id }; } my $order = getSort('search', 'sql', $param->{'sort'}); + # XXX LIMIT $on_page doesn't work since we don't get correct number of results my $sql_order = qq{ ORDER BY $order - LIMIT $on_page OFFSET ? }; - my $sql_count = qq{ select count(files.id) $sql_from $sql_where }; my $sql_results = qq{ select $sql_cols $sql_from $sql_where $sql_order }; - - my $sth = $dbh->prepare($sql_count); - $sth->execute(); - my ($results) = $sth->fetchrow_array(); - - $sth = $dbh->prepare($sql_results); - $sth->execute( $offset ); - - if ($sth->rows != $results) { - my $bug = "$0 BUG: [[ $sql_count ]] = $results while [[ $sql_results ]] = " . $sth->rows; - $bug =~ s/\s+/ /gs; - print STDERR "$bug\n"; - } + my $sth = $dbh->prepare($sql_results); + my $rows = $sth->execute( $offset ); my @ret; - + while (my $row = $sth->fetchrow_hashref()) { push @ret, $row; + last if $#ret + 1 >= $on_page; } $sth->finish(); - return ($results, \@ret); + return ($rows, \@ret); } sub getFilesHyperEstraier($) { @@ -370,36 +354,44 @@ sub getGzipSize($$) ); } -sub getVolumes($) { - my $id = shift; - - my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; - - my $sth = $dbh->prepare(qq{ +sub host_backup_nums { + my $host = shift; + my $sth = get_dbh->prepare(qq{ select - size - from backup_parts - where backup_id = ? - order by part_nr asc + hosts.name as host, -- FIXME for debug + backups.num as num, + inc_size, + size, + inc_deleted + from backups + join hosts on hosts.id = hostid + where hosts.name = ? }); - - $sth->execute($id); - - my $cumulative_size = 0; - my $volumes = 1; - - while(my ($size) = $sth->fetchrow_array) { - if ($cumulative_size + $size > $max_archive_size) { - $volumes++; - $cumulative_size = $size; - } else { - $cumulative_size += $size; - } + $sth->execute($host); + # and inc_size < 0 and size > 0 and not inc_deleted + + my $all_backup_numbers; + + while( my $row = $sth->fetchrow_hashref ) { +warn "# row ",dump $row; + $all_backup_numbers->{ $row->{num} } = + $row->{inc_deleted} ? 0 : + $row->{size} == 0 ? 0 : + $row->{inc_size} > 0 ? 0 : + $row->{size} > 0 ? 1 : + 0; } - return ($volumes,$cumulative_size); +warn "# host $host all_backup_numbers = ",dump($all_backup_numbers); + my @backup_nums = + sort { $a <=> $b } + grep { $all_backup_numbers->{$_} } + keys %$all_backup_numbers; + + return @backup_nums; } + sub getBackupsNotBurned($) { my $param = shift; @@ -410,36 +402,27 @@ sub getBackupsNotBurned($) { print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql order: $order\n"; my $sql = qq{ - SELECT - backups.hostID AS hostID, - hosts.name AS host, - shares.name AS share, - backups.num AS backupnum, - backups.type AS type, - backups.date AS date, - date_part('epoch',now()) - backups.date as age, - backups.size AS size, - backups.id AS id, - backups.inc_size AS inc_size, - backups.parts AS parts - FROM backups - INNER JOIN shares ON backups.shareID=shares.ID - INNER JOIN hosts ON backups.hostID = hosts.ID - LEFT OUTER JOIN archive_backup ON archive_backup.backup_id = backups.id - WHERE backups.inc_size > 0 AND backups.size > 0 AND backups.inc_deleted is false AND archive_backup.backup_id IS NULL AND backups.parts > 0 - GROUP BY - backups.hostID, - hosts.name, - shares.name, - backups.num, - backups.shareid, - backups.id, - backups.type, - backups.date, - backups.size, - backups.inc_size, - backups.parts - ORDER BY $order + SELECT + p.id, + p.filename, + b.date, + date_part('epoch',now()) - b.date as age, + p.size, + count(ap.*) as scheduled, + count(ab.*) as burned + FROM backup_parts p + JOIN backups b ON b.id = p.backup_id + LEFT OUTER JOIN archive_parts ap ON ap.backup_part_id = p.id + LEFT OUTER JOIN archive_burned ab ON ab.archive_id = ap.archive_id + GROUP BY p.id,filename,b.date,age,p.size,p.part_nr + }; + + $sql .= qq{ + HAVING count(ap.*) = 0 + } unless $param->{scheduled}; + + $sql .= qq{ + ORDER BY b.date,p.part_nr }; my $sth = $dbh->prepare( $sql ); my @ret; @@ -448,17 +431,6 @@ print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql orde while ( my $row = $sth->fetchrow_hashref() ) { $row->{'age'} = sprintf("%0.1f", ( $row->{'age'} / 86400 ) ); #$row->{'age'} = sprintf("%0.1f", ( (time() - $row->{'date'}) / 86400 ) ); - - my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; - if ($row->{size} > $max_archive_size) { - ($row->{volumes}, $row->{inc_size_calc}) = getVolumes($row->{id}); - } - - $row->{size} = sprintf("%0.2f", $row->{size} / 1024 / 1024); - - # do some cluster calculation (approximate) - $row->{inc_size} = int(( ($row->{inc_size} + 1023 ) / 2 ) * 2); - $row->{inc_size_calc} ||= $row->{inc_size}; push @ret, $row; } @@ -470,652 +442,654 @@ sub displayBackupsGrid($) { my $param = shift; my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; - my $max_archive_file_size = $Conf{ArchiveChunkSize} || die "no MaxFileInSize"; - my $retHTML .= q{ -
- }; + my $retHTML .= qq| + - $retHTML .= <<'EOF3'; - - -
+ $retHTML .= ""; + } + $retHTML .= ""; - -Size: kB + # all variables which has to be transfered + foreach my $n (qw/search_day_from search_month_from search_year_from search_day_to search_month_to search_year_to search_backup_day_from search_backup_month_from search_backup_year_from search_backup_day_to search_backup_month_to search_backup_year_to search_filename offset/) { + $retHTML .= qq{\n}; + } -
-
 
-
 
-
0%
-
-
+ my $del = ''; + my $max_page = int( $results / $on_page ); + my $page = 0; -
 
+ sub page_uri($) { + my $param = shift || die "no param?"; -Note: - + my $uri = $MyURL; + my $del = '?'; + foreach my $k (keys %{ $param }) { + if ($param->{$k}) { + $uri .= $del . $k . '=' . ${EscURI( $param->{$k} )}; + $del = '&'; + } + } + return $uri; + } - + sub page_link($$$) { + my ($param,$page,$display) = @_; -
- -EOF3 - $retHTML .= q{ - - - - - - } . - sort_header($param, 'Share', 'share', 'center') . - sort_header($param, '#', 'num', 'center') . - qq{ - - } . - sort_header($param, 'Date', 'date', 'center') . - sort_header($param, 'Age/days', 'age', 'center') . - sort_header($param, 'Size/Mb', 'size', 'center') . - sort_header($param, 'gzip size/Kb', 'incsize', 'center') . - qq{ - - }; + $param->{'offset'} = $page if (defined($page)); - my @color = (' bgcolor="#e0e0e0"', ''); + my $html = '' . $display . ''; + } - my $i = 0; - my $host = ''; + $retHTML .= '
'; - foreach my $backup ( getBackupsNotBurned($param) ) { + if ($offset > 0) { + $retHTML .= page_link($param, $offset - 1, '<<') . ' '; + } - if ($host ne $backup->{'host'}) { - $i++; - $host = $backup->{'host'}; + while ($page <= $max_page) { + if ($page == $offset) { + $retHTML .= $del . '' . ($page + 1) . ''; + } else { + $retHTML .= $del . page_link($param, $page, $page + 1); } - my $ftype = ""; - - my $checkbox_key = $backup->{'hostid'}. '_' .$backup->{'backupnum'} . '_' . $backup->{'id'}; - - $retHTML .= - ' -
' . - '' . - '' . - '' . - '' . - '' . - '' . - '' . - '' . - '' . - - "\n"; + if ($offset < $max_page) { + $retHTML .= ' ' . page_link($param, $offset + 1, '>>'); } - $retHTML .= "
- - Typemedias
'; - if (($backup->{'inc_size'} || 0) > 0) { - $retHTML .= ' - '; + if ($page < $offset - $pager_pages && $page != 0) { + $retHTML .= " ... "; + $page = $offset - $pager_pages; + $del = ''; + } elsif ($page > $offset + $pager_pages && $page != $max_page) { + $retHTML .= " ... "; + $page = $max_page; + $del = ''; + } else { + $del = ' | '; + $page++; } + } - my $img_url = $Conf{CgiImageDirURL}; - - $retHTML .= - '' . $backup->{'host'} . ':' . $backup->{'share'} . '' . $backup->{'backupnum'} . '' . $backup->{'type'} . '' . epoch_to_iso( $backup->{'date'} ) . '' . $backup->{'age'} . '' . $backup->{'size'} . '' . sprintf("%0.1f", $backup->{'inc_size'} / 1024 ) . - '' . ( qq{media} x $backup->{volumes} ) . '
"; - $retHTML .= "
"; - + $retHTML .= ""; + return $retHTML; -} +} -sub displayGrid($) { - my ($param) = @_; +my @units = qw/b k M G/; +sub unit { + my $v = shift; - my $offset = $param->{'offset'}; - my $hilite = $param->{'search_filename'}; + my $o = 0; - my $retHTML = ""; - - my $start_t = time(); + while ( ( $v / 10000 ) >= 1 ) { + $o++; + $v /= 1024; + } - my ($results, $files); - if ($param->{'use_hest'} && length($hilite) > 0) { - ($results, $files) = getFilesHyperEstraier($param); + if ( $v >= 1 ) { + return sprintf("%d%s", $v, $units[$o]); + } elsif ( $v == 0 ) { + return 0; } else { - ($results, $files) = getFiles($param); + return sprintf("%.1f%s", $v, $units[$o]); } +} - my $dur_t = time() - $start_t; - my $dur = sprintf("%0.4fs", $dur_t); +1; - my ($from, $to) = (($offset * $on_page) + 1, ($offset * $on_page) + $on_page); +__DATA__ - if ($results <= 0) { - $retHTML .= qq{ -

No results found...

- }; - return $retHTML; - } else { - # DEBUG - #use Data::Dumper; - #$retHTML .= '
' . Dumper($files) . '
'; - } + + +
- if ($offset > 0) { - $retHTML .= page_link($param, $offset - 1, '<<') . ' '; - } + +Size: kB - while ($page <= $max_page) { - if ($page == $offset) { - $retHTML .= $del . '' . ($page + 1) . ''; - } else { - $retHTML .= $del . page_link($param, $page, $page + 1); - } +
+
 
+
 
+
0%
+
+
- if ($page < $offset - $pager_pages && $page != 0) { - $retHTML .= " ... "; - $page = $offset - $pager_pages; - $del = ''; - } elsif ($page > $offset + $pager_pages && $page != $max_page) { - $retHTML .= " ... "; - $page = $max_page; - $del = ''; - } else { - $del = ' | '; - $page++; - } - } +
 
- if ($offset < $max_page) { - $retHTML .= ' ' . page_link($param, $offset + 1, '>>'); - } +Note: + - $retHTML .= "
"; + - return $retHTML; -} + + -1;