X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=blobdiff_plain;f=lib%2FBackupPC%2FSearch.pm;h=77a68d3fff1da521378fb9d976e5f7fa8649fc02;hp=479b51d70546c13f5eba2781b45c02f68ed2b7a4;hb=b34c67f0bd159364d2fada4930d53987e78047fc;hpb=aff621b091597104904eafcb4ad6d9fabc2bf7bb diff --git a/lib/BackupPC/Search.pm b/lib/BackupPC/Search.pm index 479b51d..77a68d3 100644 --- a/lib/BackupPC/Search.pm +++ b/lib/BackupPC/Search.pm @@ -12,6 +12,10 @@ use XML::Writer; use IO::File; use Data::Dump qw(dump); +require Exporter; +our @ISA=qw(Exporter); +our @EXPORT=qw(unit); + my $on_page = 100; my $pager_pages = 10; @@ -370,35 +374,6 @@ sub getGzipSize($$) ); } -sub getVolumes($) { - my $id = shift; - - my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; - - my $sth = $dbh->prepare(qq{ - select - size - from backup_parts - where backup_id = ? - order by part_nr asc - }); - - $sth->execute($id); - - my $cumulative_size = 0; - my $volumes = 1; - - while(my ($size) = $sth->fetchrow_array) { - if ($cumulative_size + $size > $max_archive_size) { - $volumes++; - $cumulative_size = $size; - } else { - $cumulative_size += $size; - } - } - - return ($volumes,$cumulative_size); -} sub getBackupsNotBurned($) { @@ -410,36 +385,19 @@ sub getBackupsNotBurned($) { print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql order: $order\n"; my $sql = qq{ - SELECT - backups.hostID AS hostID, - hosts.name AS host, - shares.name AS share, - backups.num AS backupnum, - backups.type AS type, - backups.date AS date, - date_part('epoch',now()) - backups.date as age, - backups.size AS size, - backups.id AS id, - backups.inc_size AS inc_size, - backups.parts AS parts - FROM backups - INNER JOIN shares ON backups.shareID=shares.ID - INNER JOIN hosts ON backups.hostID = hosts.ID - LEFT OUTER JOIN archive_backup ON archive_backup.backup_id = backups.id - WHERE backups.inc_size > 0 AND backups.size > 0 AND backups.inc_deleted is false AND archive_backup.backup_id IS NULL AND backups.parts > 0 - GROUP BY - backups.hostID, - hosts.name, - shares.name, - backups.num, - backups.shareid, - backups.id, - backups.type, - backups.date, - backups.size, - backups.inc_size, - backups.parts - ORDER BY $order + SELECT + p.id, + p.filename, + b.date, + date_part('epoch',now()) - b.date as age, + p.size, + count(ab.date) as copies + FROM backup_parts p + JOIN backups b ON b.id = p.backup_id + LEFT OUTER JOIN archive_parts ap ON ap.backup_part_id = p.id + LEFT OUTER JOIN archive_burned ab ON ab.archive_id = ap.archive_id + GROUP BY p.id,filename,b.date,age,p.size,p.part_nr + ORDER BY b.date,p.part_nr }; my $sth = $dbh->prepare( $sql ); my @ret; @@ -448,17 +406,6 @@ print STDERR "## sort=". ($param->{'sort'} || 'no sort param') . " burn sql orde while ( my $row = $sth->fetchrow_hashref() ) { $row->{'age'} = sprintf("%0.1f", ( $row->{'age'} / 86400 ) ); #$row->{'age'} = sprintf("%0.1f", ( (time() - $row->{'date'}) / 86400 ) ); - - my $max_archive_size = $Conf{ArchiveMediaSize} || die "no ArchiveMediaSize"; - if ($row->{size} > $max_archive_size) { - ($row->{volumes}, $row->{inc_size_calc}) = getVolumes($row->{id}); - } - - $row->{size} = sprintf("%0.2f", $row->{size} / 1024 / 1024); - - # do some cluster calculation (approximate) - $row->{inc_size} = int(( ($row->{inc_size} + 1023 ) / 2 ) * 2); - $row->{inc_size_calc} ||= $row->{inc_size}; push @ret, $row; } @@ -678,8 +625,8 @@ function update_size(name, checked, suma) { suma -= size; } - var volumes = parseInt( element_id("prt" + name).value); - debug('update_size('+name+','+checked+') suma: '+suma+' volumes: '+volumes); + debug('update_size('+name+','+checked+') suma: '+suma); +/* FIXME if (volumes > 1) { if (checked) { element_id("volumes").innerHTML = "This will take "+volumes+" mediums!"; @@ -691,7 +638,7 @@ function update_size(name, checked, suma) { element_id("volumes").style.display = 'none'; } } - +*/ return suma; } @@ -821,59 +768,34 @@ EOF3 } . - sort_header($param, 'Share', 'share', 'center') . - sort_header($param, '#', 'num', 'center') . - qq{ - Type - } . + sort_header($param, 'Filename', 'filename', 'left') . sort_header($param, 'Date', 'date', 'center') . sort_header($param, 'Age/days', 'age', 'center') . - sort_header($param, 'Size/Mb', 'size', 'center') . - sort_header($param, 'gzip size/Kb', 'incsize', 'center') . + sort_header($param, 'Size', 'size', 'center') . qq{ - medias + copies }; my @color = (' bgcolor="#e0e0e0"', ''); - my $i = 0; - my $host = ''; + my $i = 1; +# my $img_url = $Conf{CgiImageDirURL}; foreach my $backup ( getBackupsNotBurned($param) ) { - if ($host ne $backup->{'host'}) { - $i++; - $host = $backup->{'host'}; - } - my $ftype = ""; - - my $checkbox_key = $backup->{'hostid'}. '_' .$backup->{'backupnum'} . '_' . $backup->{'id'}; - - $retHTML .= - ' - '; - - if (($backup->{'inc_size'} || 0) > 0) { - $retHTML .= ' - '; - } - - my $img_url = $Conf{CgiImageDirURL}; - - $retHTML .= - '' . - '' . $backup->{'host'} . ':' . $backup->{'share'} . '' . - '' . $backup->{'backupnum'} . '' . - '' . $backup->{'type'} . '' . - '' . epoch_to_iso( $backup->{'date'} ) . '' . - '' . $backup->{'age'} . '' . - '' . $backup->{'size'} . '' . - '' . sprintf("%0.1f", $backup->{'inc_size'} / 1024 ) . - '' . - '' . - '' . ( qq{media} x $backup->{volumes} ) . '' . - - "\n"; + $retHTML .= join('' + ,'' + ,'' + ,'' + ,'' + ,'' + ,'', $backup->{'filename'}, '' + ,'', epoch_to_iso( $backup->{'date'} ), '' + ,'', $backup->{'age'}, '' + ,'', unit($backup->{'size'}), '' + ,'', '*' x $backup->{copies}, '' + ,"\n" + ); } $retHTML .= ""; @@ -1118,4 +1040,24 @@ sub displayGrid($) { return $retHTML; } +my @units = qw/b k M G/; +sub unit { + my $v = shift; + + my $o = 0; + + while ( ( $v / 10000 ) >= 1 ) { + $o++; + $v /= 1024; + } + + if ( $v >= 1 ) { + return sprintf("%d%s", $v, $units[$o]); + } elsif ( $v == 0 ) { + return 0; + } else { + return sprintf("%.1f%s", $v, $units[$o]); + } +} + 1;