X-Git-Url: http://git.rot13.org/?p=BackupPC.git;a=blobdiff_plain;f=bin%2FBackupPC_ASA_PostArchive_Update;h=757c93d8b33cee48ccba3af6c197d12d031c4b0c;hp=3d531eeb16994977c896e594324baa1505feb3d2;hb=c615e1bfc6f2b0604a2658996f5dd6b6cfa9469c;hpb=cc890fbe5f86ff57159bf2fbdabc52e163a729f5 diff --git a/bin/BackupPC_ASA_PostArchive_Update b/bin/BackupPC_ASA_PostArchive_Update index 3d531ee..757c93d 100755 --- a/bin/BackupPC_ASA_PostArchive_Update +++ b/bin/BackupPC_ASA_PostArchive_Update @@ -38,6 +38,7 @@ my ($opt,$usage) = describe_options( "%c %o", [ 'host|h=s@', "import just host(s)" ], [ 'num|n=s@', "import just backup number(s)" ], +[ 'ok=n', "xferOK", { default => 0 } ], [ 'check|c', "check archives on disk and sync", { default => 1 } ], [ 'debug|d', "debug", { default => 1 } ], [ 'help', "show help" ], @@ -79,20 +80,22 @@ sub get_backup_id($$) { my $key = "$host $num"; return $hsn_cache->{$key} if ($hsn_cache->{$key}); + # all backup parts will be attached to first share in backups my $sth = $dbh->prepare(qq{ SELECT - backups.id + min(backups.id) FROM backups INNER JOIN shares ON backups.shareID=shares.ID INNER JOIN hosts ON backups.hostID = hosts.ID WHERE hosts.name = ? and backups.num = ? }); $sth->execute($host, $num); + die "can't find backup $host:$num" unless $sth->rows == 1; my ($id) = $sth->fetchrow_array; - $hsn_cache->{"$host $num"} = $id; + $hsn_cache->{$key} = $id; - print STDERR "# $host $num == $id\n" if $opt->debug; + print STDERR "# $key == $id\n" if $opt->debug; return $id; } @@ -120,6 +123,13 @@ my $sth_inc_size = $dbh->prepare(qq{ where id = ? }); +sub read_only { + my $full = shift; + my $perm = (stat $full)[2] & 0444; + warn sprintf("chmod %03o %s\n",$perm,$full); + chmod $perm, $full || die $!; +} + sub check_archive { my ($host,$num) = @_; warn "# check_archive $host $num"; @@ -135,13 +145,37 @@ sub check_archive { return; } - print curr_time, " check $host $num"; + print curr_time, " check $host $num\n"; + + my $sth = $dbh->prepare(qq{ + SELECT count(*) + FROM files + JOIN shares on shares.id = shareid + JOIN hosts on hosts.id = shares.hostid + WHERE hosts.name = ? and backupnum = ? + }); + $sth->execute($host, $num); + my ($files) = $sth->fetchrow_array; + + if ( $files == 0 ) { + warn "EMPTY INCREMENT, cleanup ",dump( @tar_parts ); + foreach my $path ( @tar_parts ) { + my $full = "$Conf{ArchiveDest}/$path"; + warn "rm $full\n"; + unlink $full || die "can't remove $full: $!"; + } + return; + } my $md5_path = "$Conf{ArchiveDest}/$host.$num.md5"; - unlink $md5_path if -s $md5_path == 0; # fix empty + unlink $md5_path if -e $md5_path && -s $md5_path == 0; # fix empty + + my $read_protect = 0; if ( ! -e $md5_path ) { system_ok "cd $Conf{ArchiveDest} && /usr/bin/md5sum $host.$num.* > $md5_path"; + read_only $md5_path; + $read_protect = 1; } else { system_ok "cd $Conf{ArchiveDest} && /usr/bin/md5sum -c $md5_path" if $opt->check; } @@ -150,6 +184,7 @@ sub check_archive { foreach ( split(/\n/, read_file "$Conf{ArchiveDest}/$host.$num.md5" ) ) { my ( $md5, $path ) = split(/\s+/,$_); $md5sum->{$path} = $md5; + read_only "$Conf{ArchiveDest}/$path" if $read_protect; } # depending on expected returned value this is used like: @@ -183,16 +218,16 @@ sub check_archive { } sub check_part { - my ($host, $num, $part_nr, $tar_size, $size, $md5, $items) = @_; + my ($host, $num, $part_nr, $tar_size, $size, $md5, $items, $filename) = @_; my $backup_id = get_backup_id($host, $num); my $sth_md5 = $dbh->prepare(qq{ select - id, tar_size, size, md5, items + id, tar_size, size, md5, items, filename from backup_parts - where backup_id = ? and part_nr = ? + where backup_id = ? and part_nr = ? and filename = ? }); - $sth_md5->execute($backup_id, $part_nr); + $sth_md5->execute($backup_id, $part_nr, $filename); if (my $row = $sth_md5->fetchrow_hashref) { return if ( @@ -212,12 +247,12 @@ sub check_archive { tar_size, size, md5, - items - ) values (?,?,?,?,?,?) + items, + filename + ) values (?,?,?,?,?,?,?) }); - $sth_insert->execute($backup_id, $part_nr, $tar_size, $size, $md5, $items); - $dbh->commit; + $sth_insert->execute($backup_id, $part_nr, $tar_size, $size, $md5, $items, $filename); } print " [parts: ",join(", ", @tar_parts),"]" if $opt->debug; @@ -242,8 +277,8 @@ sub check_archive { my $size = (stat( $path ))[7] || die "can't stat $path: $!"; - if ($size > $Conf{MaxArchiveSize}) { - print ", part bigger than media $size > $Conf{MaxArchiveSize}\n"; + if ($size > $Conf{ArchiveMediaSize}) { + print ", part bigger than media $size > $Conf{ArchiveMediaSize}\n"; return 0; } @@ -263,8 +298,8 @@ sub check_archive { $items++; $tar_size_inarc += $entry->size; - if ($tar_size_inarc > $Conf{MaxArchiveFileSize}) { - print ", part $filename is too big $tar_size_inarc > $Conf{MaxArchiveFileSize}\n"; + if ($tar_size_inarc > $Conf{ArchiveMediaSize}) { + print ", part $filename is too big $tar_size_inarc > $Conf{ArchiveMediaSize}\n"; return 0; } @@ -298,10 +333,10 @@ sub check_archive { my $items = 1; $part_nr++; - check_part($host, $num, $part_nr, $tar_size, $size, $md5, $items); + check_part($host, $num, $part_nr, $tar_size, $size, $md5, $items, $filename); # round increment size to 2k block size - $inc_size += int(($size + 2048) / 2048); + $inc_size += int((($size + 2048) / 2048 ) * 2048); } $sth_inc_size->execute( @@ -309,8 +344,14 @@ sub check_archive { $part_nr, get_backup_id($host, $num), ); + + warn "## commit\n"; $dbh->commit; + return; + +=for removed + @tar_files = sort @tar_files; print "\n\t",($#tar_files + 1), " tar files"; @@ -353,18 +394,31 @@ sub check_archive { print " ",($same ? 'ok' : 'DIFFERENT'), ", dur: ",fmt_time(time() - $t), "\n"; + $dbh->commit; + return $same; +=cut } #----- main +exit unless $opt->host; + foreach ( 0 .. $#{ $opt->host } ) { - my $host = $opt->host->[$_]; + my $host = lc $opt->host->[$_]; my $num = $opt->num->[$_]; - check_archive $host => $num; + if ( ! $opt->ok ) { + warn "ERROR $host $num running cleanup"; + foreach my $path ( glob "$Conf{ArchiveDest}/$host.$num.*" ) { + warn "# rm $path"; + unlink $path || die $!; + } + } else { + check_archive $host => $num; + } } @@ -433,8 +487,6 @@ while (my $row = $sth->fetchrow_hashref) { print ", dur: ",fmt_time(time() - $t), "\n"; - $dbh->commit; - } undef $sth;