X-Git-Url: http://git.rot13.org/?a=blobdiff_plain;f=lib%2FBackupPC%2FSearch.pm;h=a0e19c307e33d49ae8f2cdfe17c0bdfea941f0fe;hb=796b15687f42564a63e80496b2c0daa641e7567d;hp=8fe2541cccd21f326605d79b70dc4ef598f4edbc;hpb=be11cc4fd68cb4f99554d1d26233d52a854ed7f4;p=BackupPC.git diff --git a/lib/BackupPC/Search.pm b/lib/BackupPC/Search.pm index 8fe2541..a0e19c3 100644 --- a/lib/BackupPC/Search.pm +++ b/lib/BackupPC/Search.pm @@ -1,5 +1,5 @@ #!/usr/bin/perl -package BackupPC::SearchLib; +package BackupPC::Search; use strict; use BackupPC::CGI::Lib qw(:all); @@ -10,22 +10,38 @@ use vars qw(%In $MyURL); use Time::HiRes qw/time/; use XML::Writer; use IO::File; -use BackupPC::Search::Estraier; +use Data::Dump qw(dump); + +require Exporter; +our @ISA=qw(Exporter); +our @EXPORT=qw(unit); my $on_page = 100; my $pager_pages = 10; -my $dsn = $Conf{SearchDSN}; -my $db_user = $Conf{SearchUser} || ''; +my $dbh; + +my $bpc = BackupPC::Lib->new || die; +$bpc->ConfigRead('_search_archive'); +my %Conf = $bpc->Conf(); -my $hest_node_url = $Conf{HyperEstraierIndex}; +sub search_module { -my $dbh; + my $search_module = $Conf{SearchModule} || die "search is disabled"; + eval "use $search_module"; + if ( $@ ) { + warn "ERROR: $search_module: $!"; + } else { + warn "# using $search_module for full-text search"; + } + return $search_module->new( %Conf ); +} +my $dbh; sub get_dbh { - $dbh ||= DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } ); + $dbh ||= DBI->connect($Conf{SearchDSN}, $Conf{SearchUser}, "", { RaiseError => 1, AutoCommit => 1 } ); return $dbh; } @@ -134,12 +150,12 @@ my $sort_def = { search => { default => 'date_a', sql => { - share_d => 'shares.name DESC', - share_a => 'shares.name ASC', - path_d => 'files.path DESC', - path_a => 'files.path ASC', - num_d => 'files.backupnum DESC', - num_a => 'files.backupnum ASC', + sname_d => 'shares.name DESC', + sname_a => 'shares.name ASC', + filepath_d => 'files.path DESC', + filepath_a => 'files.path ASC', + backupnum_d => 'files.backupnum DESC', + backupnum_a => 'files.backupnum ASC', size_d => 'files.size DESC', size_a => 'files.size ASC', date_d => 'files.date DESC', @@ -148,8 +164,8 @@ my $sort_def = { }, burn => { default => 'date_a', sql => { - share_d => 'host DESC, share DESC', - share_a => 'host ASC, share ASC', + sname_d => 'host DESC, share DESC', + sname_a => 'host ASC, share ASC', num_d => 'backupnum DESC', num_a => 'backupnum ASC', date_d => 'date DESC', @@ -262,7 +278,7 @@ sub getFilesHyperEstraier($) { my $shareid = $param->{'search_share'}; my ($backup_from, $backup_to, $files_from, $files_to) = dates_from_form($param); - return BackupPC::Search::Estraier->new( $hest_node_url )->search( + return search_module->search( $offset, $on_page, $param->{sort}, $q, $shareid, $backup_from, $backup_to, $files_from, $files_to ); @@ -288,10 +304,9 @@ sub getGzipName($$$) sub get_tgz_size_by_name($) { my $name = shift; - my $tgz = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}.'/'.$name; + my $tgz = $Conf{GzipTempDir}.'/'.$name; my $size = -1; - my $Dir = $Conf{InstallDir}."/data/log"; $|=1; if (-f "${tgz}.tar.gz") { $size = (stat("${tgz}.tar.gz"))[7]; @@ -359,35 +374,6 @@ sub getGzipSize($$) ); } -sub getVolumes($) { - my $id = shift; - - my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; - - my $sth = $dbh->prepare(qq{ - select - size - from backup_parts - where backup_id = ? - order by part_nr asc - }); - - $sth->execute($id); - - my $cumulative_size = 0; - my $volumes = 1; - - while(my ($size) = $sth->fetchrow_array) { - if ($cumulative_size + $size > $max_archive_size) { - $volumes++; - $cumulative_size = $size; - } else { - $cumulative_size += $size; - } - } - - return ($volumes,$cumulative_size); -} sub getBackupsNotBurned($) { @@ -399,36 +385,27 @@ sub getBackupsNotBurned($) { print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql order: $order\n"; my $sql = qq{ - SELECT - backups.hostID AS hostID, - hosts.name AS host, - shares.name AS share, - backups.num AS backupnum, - backups.type AS type, - backups.date AS date, - date_part('epoch',now()) - backups.date as age, - backups.size AS size, - backups.id AS id, - backups.inc_size AS inc_size, - backups.parts AS parts - FROM backups - INNER JOIN shares ON backups.shareID=shares.ID - INNER JOIN hosts ON backups.hostID = hosts.ID - LEFT OUTER JOIN archive_backup ON archive_backup.backup_id = backups.id - WHERE backups.inc_size > 0 AND backups.size > 0 AND backups.inc_deleted is false AND archive_backup.backup_id IS NULL AND backups.parts > 0 - GROUP BY - backups.hostID, - hosts.name, - shares.name, - backups.num, - backups.shareid, - backups.id, - backups.type, - backups.date, - backups.size, - backups.inc_size, - backups.parts - ORDER BY $order + SELECT + p.id, + p.filename, + b.date, + date_part('epoch',now()) - b.date as age, + p.size, + count(ap.*) as scheduled, + count(ab.*) as burned + FROM backup_parts p + JOIN backups b ON b.id = p.backup_id + LEFT OUTER JOIN archive_parts ap ON ap.backup_part_id = p.id + LEFT OUTER JOIN archive_burned ab ON ab.archive_id = ap.archive_id + GROUP BY p.id,filename,b.date,age,p.size,p.part_nr + }; + + $sql .= qq{ + HAVING count(ap.*) = 0 + } unless $param->{scheduled}; + + $sql .= qq{ + ORDER BY b.date,p.part_nr }; my $sth = $dbh->prepare( $sql ); my @ret; @@ -437,17 +414,6 @@ print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql orde while ( my $row = $sth->fetchrow_hashref() ) { $row->{'age'} = sprintf("%0.1f", ( $row->{'age'} / 86400 ) ); #$row->{'age'} = sprintf("%0.1f", ( (time() - $row->{'date'}) / 86400 ) ); - - my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; - if ($row->{size} > $max_archive_size) { - ($row->{volumes}, $row->{inc_size_calc}) = getVolumes($row->{id}); - } - - $row->{size} = sprintf("%0.2f", $row->{size} / 1024 / 1024); - - # do some cluster calculation (approximate) - $row->{inc_size} = int(( ($row->{inc_size} + 1023 ) / 2 ) * 2); - $row->{inc_size_calc} ||= $row->{inc_size}; push @ret, $row; } @@ -458,653 +424,655 @@ sub displayBackupsGrid($) { my $param = shift; - my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; - my $max_archive_file_size = $Conf{MaxArchiveFileSize} || die "no MaxFileInSize"; + my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; - my $retHTML .= q{ -
- }; + my $retHTML .= qq| + - $retHTML .= <<'EOF3'; - - -
+ my $o = 0; - -Size: kB + while ( ( $v / 10000 ) >= 1 ) { + $o++; + $v /= 1024; + } -
-
 
-
 
-
0%
-
-
+ if ( $v >= 1 ) { + return sprintf("%d%s", $v, $units[$o]); + } elsif ( $v == 0 ) { + return 0; + } else { + return sprintf("%.1f%s", $v, $units[$o]); + } +} -
 
+1; -Note: - +__DATA__ - -
+ + +
- if ($offset > 0) { - $retHTML .= page_link($param, $offset - 1, '<<') . ' '; - } + +Size: kB - while ($page <= $max_page) { - if ($page == $offset) { - $retHTML .= $del . '' . ($page + 1) . ''; - } else { - $retHTML .= $del . page_link($param, $page, $page + 1); - } +
+
 
+
 
+
0%
+
+
- if ($page < $offset - $pager_pages && $page != 0) { - $retHTML .= " ... "; - $page = $offset - $pager_pages; - $del = ''; - } elsif ($page > $offset + $pager_pages && $page != $max_page) { - $retHTML .= " ... "; - $page = $max_page; - $del = ''; - } else { - $del = ' | '; - $page++; - } - } +
 
- if ($offset < $max_page) { - $retHTML .= ' ' . page_link($param, $offset + 1, '>>'); - } +Note: + - $retHTML .= "
"; + - return $retHTML; -} + + -1;