-sub getBackupsNotBurned() {
-
- my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 1 } );
- my $sql = q{
- SELECT
- hosts.ID AS hostid,
- min(hosts.name) AS host,
- backups.num AS backupno,
- min(backups.type) AS type,
- min(backups.date) AS date
- FROM backups, shares, files, hosts
- WHERE
- backups.num = files.backupNum AND
- shares.ID = files.shareID AND
- backups.hostID = shares.hostID AND
- hosts.ID = backups.hostID AND
- files.dvdid IS NULL
- GROUP BY
- backups.hostID, backups.num, hosts.id
+sub getHyperEstraier_url($) {
+ my ($use_hest) = @_;
+
+ return unless $use_hest;
+
+ use Search::Estraier 0.04;
+ die "direct access to Hyper Estraier datatase is no longer supported. Please use estmaster\n"
+ unless ($use_hest =~ m#^http://#);
+
+ return $use_hest;
+}
+
+sub getFilesHyperEstraier($) {
+ my ($param) = @_;
+
+ my $offset = $param->{'offset'} || 0;
+ $offset *= $on_page;
+
+ die "no Hyper Estraier node URL?" unless ($hest_node_url);
+
+ # open the database
+ my $db;
+ if ($hest_node_url) {
+ $db ||= Search::Estraier::Node->new($hest_node_url);
+ $db->set_auth('admin', 'admin');
+ } else {
+ die "BUG: unimplemented";
+ }
+
+ # create a search condition object
+ my $cond = Search::Estraier::Condition->new();
+
+ my $q = $param->{'search_filename'};
+ my $shareid = $param->{'search_share'};
+
+ if (length($q) > 0) {
+ # exact match
+ $cond->add_attr("filepath ISTRINC $q");
+
+ $q =~ s/(.)/$1 /g;
+ # set the search phrase to the search condition object
+ $cond->set_phrase($q);
+ }
+
+ my ($backup_from, $backup_to, $files_from, $files_to) = dates_from_form($param);
+
+ $cond->add_attr("backup_date NUMGE $backup_from") if ($backup_from);
+ $cond->add_attr("backup_date NUMLE $backup_to") if ($backup_to);
+
+ $cond->add_attr("date NUMGE $files_from") if ($files_from);
+ $cond->add_attr("date NUMLE $files_to") if ($files_to);
+
+ $cond->add_attr("shareid NUMEQ $shareid") if ($shareid);
+
+ $cond->set_max( $offset + $on_page );
+ $cond->set_options( 'SURE' );
+ $cond->set_order( getSort('search', 'est', $param->{'sort'} ) );
+
+ # get the result of search
+ my @res;
+ my ($result, $hits);
+
+ if ($hest_node_url) {
+ $result = $db->search($cond, 0);
+ if ($result) {
+ $hits = $result->hits;
+ } else {
+ $hits = 0;
+ return ($hits,[]);
+ }
+ } else {
+ die "BUG: unimplemented";
+ }
+
+ # for each document in result
+ for my $i ($offset .. ($offset + $on_page - 1)) {
+ last if ($i >= $result->doc_num);
+
+ my $doc;
+ if ($hest_node_url) {
+ $doc = $result->get_doc($i);
+ } else {
+ die "BUG: unimplemented";
+ }
+
+ my $row;
+ foreach my $c (qw/fid hname sname backupnum filepath date type size/) {
+ $row->{$c} = $doc->attr($c);
+ }
+ push @res, $row;
+ }
+
+ return ($hits, \@res);
+}
+
+sub getGzipName($$$)
+{
+ my ($host, $share, $backupnum) = @_;
+ my $ret = $Conf{GzipSchema};
+
+ $share =~ s/\//_/g;
+ $ret =~ s/\\h/$host/ge;
+ $ret =~ s/\\s/$share/ge;
+ $ret =~ s/\\n/$backupnum/ge;
+
+ $ret =~ s/__+/_/g;
+
+ return $ret;
+
+}
+
+sub get_tgz_size_by_name($) {
+ my $name = shift;
+
+ my $tgz = $Conf{InstallDir}.'/'.$Conf{GzipTempDir}.'/'.$name;
+
+ my $size = -1;
+
+ if (-f "${tgz}.tar.gz") {
+ $size = (stat("${tgz}.tar.gz"))[7];
+ } elsif (-d $tgz) {
+ opendir(my $dir, $tgz) || die "can't opendir $tgz: $!";
+ my @parts = grep { !/^\./ && !/md5/ && -f "$tgz/$_" } readdir($dir);
+ $size = 0;
+ foreach my $part (@parts) {
+ $size += (stat("$tgz/$part"))[7] || die "can't stat $tgz/$part: $!";
+ }
+ closedir $dir;
+ } else {
+ return -1;
+ }
+
+ return $size;
+}
+
+sub getGzipSize($$)
+{
+ my ($hostID, $backupNum) = @_;
+ my $sql;
+ my $dbh = get_dbh();
+
+ $sql = q{
+ SELECT hosts.name as host,
+ shares.name as share,
+ backups.num as backupnum
+ FROM hosts, backups, shares
+ WHERE shares.id=backups.shareid AND
+ hosts.id =backups.hostid AND
+ hosts.id=? AND
+ backups.num=?
+ };
+ my $sth = $dbh->prepare($sql);
+ $sth->execute($hostID, $backupNum);
+
+ my $row = $sth->fetchrow_hashref();
+
+ return get_tgz_size_by_name(
+ getGzipName($row->{'host'}, $row->{share}, $row->{'backupnum'})
+ );
+}
+
+sub getVolumes($) {
+ my $id = shift;
+
+ my $max_archive_size = $Conf{MaxArchiveSize} || die "no MaxArchiveSize";
+
+ my $sth = $dbh->prepare(qq{
+ select
+ size
+ from backup_parts
+ where backup_id = ?
+ order by part_nr asc
+ });
+
+ $sth->execute($id);
+
+ my $cumulative_size = 0;
+ my $volumes = 1;
+
+ while(my ($size) = $sth->fetchrow_array) {
+ if ($cumulative_size + $size > $max_archive_size) {
+ $volumes++;
+ $cumulative_size = $size;
+ } else {
+ $cumulative_size += $size;
+ }
+ }
+
+ return ($volumes,$cumulative_size);
+}
+
+sub getBackupsNotBurned($) {
+
+ my $param = shift;
+ my $dbh = get_dbh();
+
+ my $order = getSort('burn', 'sql', $param->{'sort'});
+
+print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql order: $order\n";
+
+ my $sql = qq{
+ SELECT
+ backups.hostID AS hostID,
+ hosts.name AS host,
+ shares.name AS share,
+ backups.num AS backupnum,
+ backups.type AS type,
+ backups.date AS date,
+ date_part('epoch',now()) - backups.date as age,
+ backups.size AS size,
+ backups.id AS id,
+ backups.inc_size AS inc_size,
+ backups.parts AS parts
+ FROM backups
+ INNER JOIN shares ON backups.shareID=shares.ID
+ INNER JOIN hosts ON backups.hostID = hosts.ID
+ LEFT OUTER JOIN archive_backup ON archive_backup.backup_id = backups.id
+ WHERE backups.inc_size > 0 AND backups.size > 0 AND backups.inc_deleted is false AND archive_backup.backup_id IS NULL AND backups.parts > 0
+ GROUP BY
+ backups.hostID,
+ hosts.name,
+ shares.name,
+ backups.num,
+ backups.shareid,
+ backups.id,
+ backups.type,
+ backups.date,
+ backups.size,
+ backups.inc_size,
+ backups.parts
+ ORDER BY $order