if ( $@ ) {
warn "ERROR: $search_module: $!";
} else {
- warn "# using $search_module for full-text search";
+ #warn "# using $search_module for full-text search";
}
return $search_module->new( %Conf );
sub mk_epoch_date($$) {
my ($name,$suffix) = @_;
- my $yyyy = $param->{ $name . '_year_' . $suffix} || return undef;
- my $mm .= $param->{ $name . '_month_' . $suffix} ||
- ( $suffix eq 'from' ? 1 : 12);
- my $dd .= $param->{ $name . '_day_' . $suffix} ||
- ( $suffix eq 'from' ? 1 : 31);
+ my $yyyy = $param->{ $name . '_year_' . $suffix} || return undef;
+ my $mm = $param->{ $name . '_month_' . $suffix};
+ my $dd = $param->{ $name . '_day_' . $suffix};
$yyyy =~ s/\D//g;
- $mm =~ s/\D//g;
- $dd =~ s/\D//g;
-
- my $h = my $m = my $s = 0;
- if ($suffix eq 'to') {
- $h = 23;
- $m = 59;
- $s = 59;
- }
+ $mm =~ s/\D//g;
+ $dd =~ s/\D//g;
my $dt = new DateTime(
- year => $yyyy,
- month => $mm,
- day => $dd,
- hour => $h,
- minute => $m,
- second => $s,
+ year => $yyyy,
+ month => $mm || 1,
+ day => $dd || 1,
+ hour => 0,
+ minute => 0,
+ second => 0,
);
+ if ( $suffix eq 'to' && ( ! $mm || ! $dd ) ) {
+ $dt += DateTime::Duration->new( years => 1 ) if ! $mm;
+ $dt += DateTime::Duration->new( months => 1 ) if ! $dd;
+ $dt -= DateTime::Duration->new( days => 1 );
+ }
+
print STDERR "mk_epoch_date($name,$suffix) [$yyyy-$mm-$dd] = " . $dt->ymd . " " . $dt->hms . "\n";
return $dt->epoch || 'NULL';
}
push( @conditions, ' files.shareid = ' . $param->{'search_share'} ) if ($param->{'search_share'});
push (@conditions, " upper(files.path) LIKE upper('%".$param->{'search_filename'}."%')") if ($param->{'search_filename'});
- if ( $param->{burned} ) {
- my $is_what = 'is null';
- $is_what = '= 1' if ($param->{burned} eq 'burned');
- push @conditions, "archive_burned.part $is_what";
- push @conditions, "archive_burned.copy $is_what";
- }
+ push @conditions, join(' ' , 'burned is', $param->{burned} eq 'burned' ? '' : 'not', 'true') if $param->{burned};
return join(" and ", @conditions);
}
# do we have to add tables for burned media?
if ( $param->{burned} ) {
$sql_from .= qq{
- LEFT OUTER JOIN archive_backup on archive_backup.backup_id = backups.id
- LEFT OUTER JOIN archive_burned on archive_burned.archive_id = archive_backup.archive_id
+ LEFT OUTER JOIN backups_burned on backup_id = backups.id
};
}
my $order = getSort('search', 'sql', $param->{'sort'});
+ # XXX LIMIT $on_page doesn't work since we don't get correct number of results
my $sql_order = qq{
ORDER BY $order
- LIMIT $on_page
OFFSET ?
};
- my $sql_count = qq{ select count(files.id) $sql_from $sql_where };
my $sql_results = qq{ select $sql_cols $sql_from $sql_where $sql_order };
-
- my $sth = $dbh->prepare($sql_count);
- $sth->execute();
- my ($results) = $sth->fetchrow_array();
-
- $sth = $dbh->prepare($sql_results);
- $sth->execute( $offset );
-
- if ($sth->rows != $results) {
- my $bug = "$0 BUG: [[ $sql_count ]] = $results while [[ $sql_results ]] = " . $sth->rows;
- $bug =~ s/\s+/ /gs;
- print STDERR "$bug\n";
- }
+ my $sth = $dbh->prepare($sql_results);
+ my $rows = $sth->execute( $offset );
my @ret;
-
+
while (my $row = $sth->fetchrow_hashref()) {
push @ret, $row;
+ last if $#ret + 1 >= $on_page;
}
$sth->finish();
- return ($results, \@ret);
+ return ($rows, \@ret);
}
sub getFilesHyperEstraier($) {
);
}
+sub host_backup_nums {
+ my $host = shift;
+ my $sth = get_dbh->prepare(qq{
+ select
+ hosts.name as host, -- FIXME for debug
+ backups.num as num,
+ inc_size,
+ size,
+ inc_deleted
+ from backups
+ join hosts on hosts.id = hostid
+ where hosts.name = ?
+ });
+ $sth->execute($host);
+ # and inc_size < 0 and size > 0 and not inc_deleted
+
+ my $all_backup_numbers;
+ # pre-seed with on disk backups
+ $all_backup_numbers->{ $_->{num} }++ foreach $bpc->BackupInfoRead($host);
+
+ while( my $row = $sth->fetchrow_hashref ) {
+warn "# row ",dump $row;
+ $all_backup_numbers->{ $row->{num} } =
+ $row->{inc_deleted} ? 0 :
+ $row->{size} == 0 ? 0 :
+ $row->{inc_size} > 0 ? 0 :
+ $row->{size} > 0 ? 1 :
+ 0;
+ }
+
+warn "# host $host all_backup_numbers = ",dump($all_backup_numbers);
+ my @backup_nums =
+ sort { $a <=> $b }
+ grep { $all_backup_numbers->{$_} }
+ keys %$all_backup_numbers;
+
+ return @backup_nums;
+}
+
sub getBackupsNotBurned($) {
my $param = shift;
my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize";
- my $max_archive_file_size = $Conf{ArchiveChunkSize} || die "no MaxFileInSize";
my $retHTML .= qq|
<form id="forma" method="POST" action="$MyURL?action=burn">
<script type="text/javascript">
var media_size = $max_archive_size ;
-var max_file_size = $max_archive_file_size;
</script>
|;
$sth_archived ||= $dbh->prepare(qq{
select
- dvd_nr, note,
+ archive.dvd_nr, note,
count(archive_burned.copy) as copies
from archive
inner join archive_burned on archive_burned.archive_id = archive.id
- inner join archive_backup on archive.id = archive_backup.archive_id
- inner join backups on backups.id = archive_backup.backup_id
+ inner join archive_backup_parts on archive.id = archive_backup_parts.archive_id
+ inner join backups on backups.id = archive_backup_parts.backup_id
inner join hosts on hosts.id = backups.hostid
inner join shares on shares.id = backups.shareid
where hosts.name = ? and shares.name = ? and backups.num = ?
- group by dvd_nr, note
+ group by archive.dvd_nr, note
});
my @mediums;