X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=blobdiff_plain;f=lib%2FBackupPC%2FSearch.pm;h=f2dac44032b8a661042a284e18dbeedc25c63b8e;hp=742f3f2716af1d030226d030d0792bf372f1f777;hb=1f12f5ba25530a04f3a5e7b86c3ac88d9526fe1d;hpb=c895c85bd282936cd38d900904bd94734fdc8248 diff --git a/lib/BackupPC/Search.pm b/lib/BackupPC/Search.pm index 742f3f2..f2dac44 100644 --- a/lib/BackupPC/Search.pm +++ b/lib/BackupPC/Search.pm @@ -10,22 +10,38 @@ use vars qw(%In $MyURL); use Time::HiRes qw/time/; use XML::Writer; use IO::File; -use BackupPC::Search::Estraier; +use Data::Dump qw(dump); + +require Exporter; +our @ISA=qw(Exporter); +our @EXPORT=qw(unit); my $on_page = 100; my $pager_pages = 10; -my $dsn = $Conf{SearchDSN}; -my $db_user = $Conf{SearchUser} || ''; +my $dbh; -my $hest_node_url = $Conf{HyperEstraierIndex}; +my $bpc = BackupPC::Lib->new || die; +$bpc->ConfigRead('_search_archive'); +my %Conf = $bpc->Conf(); -my $dbh; +sub search_module { + + my $search_module = $Conf{SearchModule} || die "search is disabled"; + eval "use $search_module"; + if ( $@ ) { + warn "ERROR: $search_module: $!"; + } else { + warn "# using $search_module for full-text search"; + } + return $search_module->new( %Conf ); +} +my $dbh; sub get_dbh { - $dbh ||= DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } ); + $dbh ||= DBI->connect($Conf{SearchDSN}, $Conf{SearchUser}, "", { RaiseError => 1, AutoCommit => 1 } ); return $dbh; } @@ -134,12 +150,12 @@ my $sort_def = { search => { default => 'date_a', sql => { - share_d => 'shares.name DESC', - share_a => 'shares.name ASC', - path_d => 'files.path DESC', - path_a => 'files.path ASC', - num_d => 'files.backupnum DESC', - num_a => 'files.backupnum ASC', + sname_d => 'shares.name DESC', + sname_a => 'shares.name ASC', + filepath_d => 'files.path DESC', + filepath_a => 'files.path ASC', + backupnum_d => 'files.backupnum DESC', + backupnum_a => 'files.backupnum ASC', size_d => 'files.size DESC', size_a => 'files.size ASC', date_d => 'files.date DESC', @@ -148,8 +164,8 @@ my $sort_def = { }, burn => { default => 'date_a', sql => { - share_d => 'host DESC, share DESC', - share_a => 'host ASC, share ASC', + sname_d => 'host DESC, share DESC', + sname_a => 'host ASC, share ASC', num_d => 'backupnum DESC', num_a => 'backupnum ASC', date_d => 'date DESC', @@ -262,7 +278,7 @@ sub getFilesHyperEstraier($) { my $shareid = $param->{'search_share'}; my ($backup_from, $backup_to, $files_from, $files_to) = dates_from_form($param); - return BackupPC::Search::Estraier->new( $hest_node_url )->search( + return search_module->search( $offset, $on_page, $param->{sort}, $q, $shareid, $backup_from, $backup_to, $files_from, $files_to ); @@ -288,10 +304,9 @@ sub getGzipName($$$) sub get_tgz_size_by_name($) { my $name = shift; - my $tgz = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}.'/'.$name; + my $tgz = $Conf{GzipTempDir}.'/'.$name; my $size = -1; - my $Dir = $Conf{InstallDir}."/data/log"; $|=1; if (-f "${tgz}.tar.gz") { $size = (stat("${tgz}.tar.gz"))[7]; @@ -362,7 +377,7 @@ sub getGzipSize($$) sub getVolumes($) { my $id = shift; - my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; + my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; my $sth = $dbh->prepare(qq{ select @@ -438,13 +453,11 @@ print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql orde $row->{'age'} = sprintf("%0.1f", ( $row->{'age'} / 86400 ) ); #$row->{'age'} = sprintf("%0.1f", ( (time() - $row->{'date'}) / 86400 ) ); - my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; + my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; if ($row->{size} > $max_archive_size) { ($row->{volumes}, $row->{inc_size_calc}) = getVolumes($row->{id}); } - $row->{size} = sprintf("%0.2f", $row->{size} / 1024 / 1024); - # do some cluster calculation (approximate) $row->{inc_size} = int(( ($row->{inc_size} + 1023 ) / 2 ) * 2); $row->{inc_size_calc} ||= $row->{inc_size}; @@ -458,8 +471,8 @@ sub displayBackupsGrid($) { my $param = shift; - my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize"; - my $max_archive_file_size = $Conf{MaxArchiveFileSize} || die "no MaxFileInSize"; + my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; + my $max_archive_file_size = $Conf{ArchiveChunkSize} || die "no MaxFileInSize"; my $retHTML .= q{
@@ -817,8 +830,8 @@ EOF3 } . sort_header($param, 'Date', 'date', 'center') . sort_header($param, 'Age/days', 'age', 'center') . - sort_header($param, 'Size/Mb', 'size', 'center') . - sort_header($param, 'gzip size/Kb', 'incsize', 'center') . + sort_header($param, 'Size', 'size', 'center') . + sort_header($param, 'gzip size', 'incsize', 'center') . qq{ medias }; @@ -856,8 +869,8 @@ EOF3 '' . $backup->{'type'} . '' . '' . epoch_to_iso( $backup->{'date'} ) . '' . '' . $backup->{'age'} . '' . - '' . $backup->{'size'} . '' . - '' . sprintf("%0.1f", $backup->{'inc_size'} / 1024 ) . + '' . unit($backup->{'size'}) . '' . + '' . unit($backup->{'inc_size'}) . '' . '' . '' . '' . ( qq{media} x $backup->{volumes} ) . '' . @@ -952,9 +965,9 @@ sub displayGrid($) { } $retHTML .= - sort_header($param, 'Share', 'share', 'center') . - sort_header($param, 'Type and Name', 'path', 'center') . - sort_header($param, '#', 'num', 'center') . + sort_header($param, 'Share', 'sname', 'center') . + sort_header($param, 'Type and Name', 'filepath', 'center') . + sort_header($param, '#', 'backupnum', 'center') . sort_header($param, 'Size', 'size', 'center') . sort_header($param, 'Date', 'date', 'center'); @@ -1107,4 +1120,24 @@ sub displayGrid($) { return $retHTML; } +my @units = qw/b k M G/; +sub unit { + my $v = shift; + + my $o = 0; + + while ( ( $v / 10000 ) >= 1 ) { + $o++; + $v /= 1024; + } + + if ( $v >= 1 ) { + return sprintf("%d%s", $v, $units[$o]); + } elsif ( $v == 0 ) { + return 0; + } else { + return sprintf("%.1f%s", $v, $units[$o]); + } +} + 1;