#!/usr/local/bin/perl -w use strict; use lib "/usr/local/BackupPC/lib"; use DBI; use BackupPC::Lib; use BackupPC::View; use BackupPC::Attrib qw/:all/; use Data::Dump qw(dump); use Time::HiRes qw/time/; use POSIX qw/strftime/; use Cwd qw/abs_path/; use Archive::Tar::Streamed; use Algorithm::Diff; use File::Slurp; use Getopt::Long::Descriptive; =head1 NAME BackupPC_ASA_PostArchive_Update =head1 DESCRIPTION # /etc/BackupPC/pc/_search_archive.pl =cut my $bpc = BackupPC::Lib->new || die "can't create BackupPC::Lib"; $bpc->ConfigRead('_search_archive'); # read our configuration my %Conf = $bpc->Conf(); use BackupPC::Search; %BackupPC::Search::Conf = %Conf; my ($opt,$usage) = describe_options( "%c %o", [ 'host|h=s@', "import just host(s)" ], [ 'num|n=s@', "import just backup number(s)" ], [ 'ok=n', "xferOK", { default => 0 } ], [ 'check|c', "check archives on disk and sync", { default => 1 } ], [ 'debug|d', "debug", { default => 1 } ], [ 'help', "show help" ], ); print($usage->text), exit if $opt->help; $|=1; my $start_t = time(); my $t_fmt = '%Y-%m-%d %H:%M:%S'; #warn "## Conf = ",dump( \%Conf ); my $dbh = DBI->connect($Conf{SearchDSN}, $Conf{SearchUser}, "", { RaiseError => 1, AutoCommit => 0 }); #---- subs ---- sub curr_time { return strftime($t_fmt,localtime()); } sub fmt_time { my $t = shift || return; my $out = ""; my ($ss,$mm,$hh) = gmtime($t); $out .= "${hh}h" if ($hh); $out .= sprintf("%02d:%02d", $mm,$ss); return $out; } my $hsn_cache; sub get_backup_id($$) { my ($host, $num) = @_; my $key = "$host $num"; return $hsn_cache->{$key} if ($hsn_cache->{$key}); # all backup parts will be attached to first share in backups my $sth = $dbh->prepare(qq{ SELECT min(backups.id) FROM backups INNER JOIN shares ON backups.shareID=shares.ID INNER JOIN hosts ON backups.hostID = hosts.ID WHERE hosts.name = ? and backups.num = ? }); $sth->execute($host, $num); die "can't find backup $host:$num" unless $sth->rows == 1; my ($id) = $sth->fetchrow_array; $hsn_cache->{$key} = $id; print STDERR "# $key == $id\n" if $opt->debug; return $id; } sub backup_inc_deleted($) { my $backup_id = shift; my $sth_inc_deleted = $dbh->prepare(qq{ update backups set inc_deleted = true where id = ? }); $sth_inc_deleted->execute($backup_id); } sub system_ok { warn "## system_ok @_\n"; system(@_) == 0 || die "system @_:$!"; } my $sth_inc_size = $dbh->prepare(qq{ update backups set inc_size = ?, parts = ?, inc_deleted = false where id = ? }); sub read_only { my $full = shift; my $perm = (stat $full)[2] & 0444; warn sprintf("chmod %03o %s\n",$perm,$full); chmod $perm, $full || die $!; } sub check_archive { my ($host,$num) = @_; warn "# check_archive $host $num"; my $t = time(); my $glob = "$Conf{ArchiveDest}/$host.$num.*"; my @tar_parts = sort map { s/^\Q$Conf{ArchiveDest}\E\/*//; $_ } glob $glob ; if ( ! @tar_parts ) { warn "ERROR: no files for $glob"; return; } print curr_time, " check $host $num\n"; my $sth = $dbh->prepare(qq{ SELECT count(*) FROM files JOIN shares on shares.id = shareid JOIN hosts on hosts.id = shares.hostid WHERE hosts.name = ? and backupnum = ? }); $sth->execute($host, $num); my ($files) = $sth->fetchrow_array; if ( $files == 0 ) { warn "EMPTY INCREMENT, cleanup ",dump( @tar_parts ); foreach my $path ( @tar_parts ) { my $full = "$Conf{ArchiveDest}/$path"; warn "rm $full\n"; unlink $full || die "can't remove $full: $!"; } return; } my $md5_path = "$Conf{ArchiveDest}/$host.$num.md5"; unlink $md5_path if -e $md5_path && -s $md5_path == 0; # fix empty my $read_protect = 0; if ( ! -e $md5_path ) { system_ok "cd $Conf{ArchiveDest} && /usr/bin/md5sum $host.$num.* > $md5_path"; read_only $md5_path; $read_protect = 1; } else { system_ok "cd $Conf{ArchiveDest} && /usr/bin/md5sum -c $md5_path" if $opt->check; } my $md5sum; foreach ( split(/\n/, read_file "$Conf{ArchiveDest}/$host.$num.md5" ) ) { my ( $md5, $path ) = split(/\s+/,$_); $md5sum->{$path} = $md5; read_only "$Conf{ArchiveDest}/$path" if $read_protect; } # depending on expected returned value this is used like: # my $uncompress_size = get_gzip_size('/full/path/to.gz'); # my ($compress_size, $uncompress_size) = get_gzip_size('/path.gz'); sub get_gzip_size($) { my $filename = shift; die "file $filename problem: $!" unless (-r $filename); if ( $filename !~ m/\.gz$/ ) { return -s $filename; } open(my $gzip, $Conf{GzipPath}." -l $filename |") || die "can't gzip -l $filename: $!"; local $/ = undef; my $line = <$gzip>; close($gzip); my ($comp, $uncomp) = (0,0); if ($line =~ m/\s+(\d+)\s+(\d+)\s+\d+\.\d+/s) { if (wantarray) { return [ $1, $2 ]; } else { return $2; } } else { warn "ERROR can't parse: $line"; return -s $filename; } } sub check_part { my ($host, $num, $part_nr, $tar_size, $size, $md5, $items, $filename) = @_; my $backup_id = get_backup_id($host, $num); my $sth_md5 = $dbh->prepare(qq{ select id, tar_size, size, md5, items, filename from backup_parts where backup_id = ? and part_nr = ? and filename = ? }); $sth_md5->execute($backup_id, $part_nr, $filename); if (my $row = $sth_md5->fetchrow_hashref) { return if ( $row->{tar_size} >= $tar_size && $row->{size} == $size && $row->{md5} eq $md5 && $row->{items} == $items ); print ", deleting invalid backup_parts $row->{id}"; $dbh->do(qq{ delete from backup_parts where id = $row->{id} }); } print ", inserting new"; my $sth_insert = $dbh->prepare(qq{ insert into backup_parts ( backup_id, part_nr, tar_size, size, md5, items, filename ) values (?,?,?,?,?,?,?) }); $sth_insert->execute($backup_id, $part_nr, $tar_size, $size, $md5, $items, $filename); } print " [parts: ",join(", ", @tar_parts),"]" if $opt->debug; my @tar_files; my $backup_part; print " reading" if $opt->debug; my $part_nr = 0; my $inc_size = 0; foreach my $filename (@tar_parts) { next if $filename eq "$host.$num.md5"; print "\n\t- $filename"; my $path = "$Conf{ArchiveDest}/$filename"; $path =~ s{//+}{/}g; my $size = (stat( $path ))[7] || die "can't stat $path: $!"; if ($size > $Conf{ArchiveMediaSize}) { print ", part bigger than media $size > $Conf{ArchiveMediaSize}\n"; return 0; } print ", $size bytes"; =for later open(my $fh, "gzip -cd $path |") or die "can't open $path: $!"; binmode($fh); my $tar = Archive::Tar::Streamed->new($fh); my $tar_size_inarc = 0; my $items = 0; while(my $entry = $tar->next) { push @tar_files, $entry->name; $items++; $tar_size_inarc += $entry->size; if ($tar_size_inarc > $Conf{ArchiveMediaSize}) { print ", part $filename is too big $tar_size_inarc > $Conf{ArchiveMediaSize}\n"; return 0; } } close($fh); print ", $items items"; if ($tar_size_inarc == 0 && $items == 0) { print ", EMPTY tar\n"; my $backup_id = get_backup_id($host, $share, $num); backup_inc_deleted( $backup_id ); $dbh->commit; return 1; } =cut # FIXME my $tar_size = get_gzip_size( $path ); # # finally, check if backup_parts table in database is valid # my $md5 = $md5sum->{$filename} || die "no md5sum for $filename in ",dump($md5sum); my $items = 1; $part_nr++; check_part($host, $num, $part_nr, $tar_size, $size, $md5, $items, $filename); # round increment size to 2k block size $inc_size += int((($size + 2048) / 2048 ) * 2048); } $sth_inc_size->execute( $inc_size, $part_nr, get_backup_id($host, $num), ); warn "## commit\n"; $dbh->commit; return; =for removed @tar_files = sort @tar_files; print "\n\t",($#tar_files + 1), " tar files"; my $sth = $dbh->prepare(qq{ SELECT path,type FROM files JOIN shares on shares.id = shareid JOIN hosts on hosts.id = shares.hostid WHERE hosts.name = ? and backupnum = ? }); $sth->execute($host, $num); my @db_files; while( my $row = $sth->fetchrow_hashref ) { my $path = $row->{'path'} || die "no path?"; $path =~ s#^/#./#; $path .= '/' if ($row->{'type'} == BPC_FTYPE_DIR); push @db_files, $path; } print " ",($#db_files + 1), " database files, diff"; @db_files = sort @db_files; my $same = 1; if ($#tar_files != $#db_files) { $same = 0; print " NUMBER"; } else { my $diff = Algorithm::Diff->new(\@tar_files, \@db_files); while ( $diff->Next() ) { next if $diff->Same(); $same = 0; print "< $_\n" for $diff->Items(1); print "> $_\n" for $diff->Items(2); } } print " ",($same ? 'ok' : 'DIFFERENT'), ", dur: ",fmt_time(time() - $t), "\n"; $dbh->commit; return $same; =cut } #----- main exit unless $opt->host; foreach ( 0 .. $#{ $opt->host } ) { my $host = lc $opt->host->[$_]; my $num = $opt->num->[$_]; if ( ! $opt->ok ) { warn "ERROR $host $num running cleanup"; foreach my $path ( glob "$Conf{ArchiveDest}/$host.$num.*" ) { warn "# rm $path"; unlink $path || die $!; } } else { check_archive $host => $num; } } exit; # FIXME my $sth = $dbh->prepare( qq{ select backups.id as backup_id, hosts.name as host, shares.name as share, backups.num as num, backups.date, inc_size, parts, count(backup_parts.backup_id) as backup_parts from backups join shares on backups.hostid = shares.hostid and shares.id = backups.shareid join hosts on shares.hostid = hosts.id full outer join backup_parts on backups.id = backup_parts.backup_id where not inc_deleted and backups.size > 0 group by backups.id, hosts.name, shares.name, backups.num, backups.date, inc_size, parts, backup_parts.backup_id order by backups.date } ); $sth->execute(); my $num_backups = $sth->rows; my $curr_backup = 1; while (my $row = $sth->fetchrow_hashref) { $curr_backup++; my $tar_file = BackupPC::Search::getGzipName($row->{'host'}, $row->{'share'}, $row->{'num'}); # this will return -1 if file doesn't exist my $size = BackupPC::Search::get_tgz_size_by_name($tar_file); print "# host: ".$row->{host}.", share: ".$row->{'share'}.", backup_num:".$row->{num}." size: $size backup.size: ", $row->{inc_size},"\n" if $opt->debug; if ( $row->{'inc_size'} != -1 && $size != -1 && $row->{'inc_size'} >= $size && $row->{parts} == $row->{backup_parts}) { if ($opt->check) { tar_check($row->{'host'}, $row->{'share'}, $row->{'num'}, $tar_file) && next; } else { next; } } print curr_time, " creating $curr_backup/$num_backups ", $row->{host}, ":", $row->{share}, " #", $row->{num}, " ", strftime('%Y-%m-%d', localtime($row->{date})), " -> $tar_file"; my $t = time(); =for later # re-create archive? my $cmd = qq[ $tarIncCreate -h "$row->{host}" -s "$row->{share}" -n $row->{num} -f ]; print STDERR "## $cmd\n" if ($opt->debug); if (system($cmd) != 0) { print STDERR " FAILED, marking this backup deleted"; backup_inc_deleted( $row->{backup_id} ); } =cut print ", dur: ",fmt_time(time() - $t), "\n"; } undef $sth; $dbh->disconnect;