my $in_backup_increment;
-if ( !getopts("th:n:p:r:s:b:w:vd", \%opts) ) {
+if ( !getopts("th:n:p:r:s:b:w:vdf", \%opts) ) {
print STDERR <<EOF;
usage: $0 [options]
Required options:
-p pathAdd new path prefix
-b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
-w writeBufSz write buffer size (default 1048576 = 1MB)
+ -f overwrite existing parts
-v verbose output
-d debug output
EOF
}
if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) {
- print(STDERR "$0: bad host name '$opts{h}'\n");
- exit(1);
+ die "$0: bad host name '$opts{h}'\n";
}
my $Host = $opts{h};
if ( $opts{n} !~ /^(-?\d+)$/ ) {
- print(STDERR "$0: bad dump number '$opts{n}'\n");
- exit(1);
+ die "$0: bad dump number '$opts{n}'\n";
}
my $Num = $opts{n};
my $SpecialCnt = 0;
my $ErrorCnt = 0;
my $current_tar_size = 0;
+my $total_increment_size = 0;
my $i;
$Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 );
last if ( $Backups[$i]{num} == $Num );
}
if ( $i >= @Backups ) {
- print(STDERR "$0: bad backup number $Num for host $Host\n");
- exit(1);
+ die "$0: bad backup number $Num for host $Host\n";
}
my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ );
my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ );
if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) {
- print(STDERR "$0: bad share name '$opts{s}'\n");
- exit(1);
+ die "$0: bad share name '$opts{s}'\n";
}
our $ShareName = $opts{s};
our $view = BackupPC::View->new($bpc, $Host, \@Backups);
inc_size = ?,
parts = ?,
inc_deleted = false
- where id = ? });
+ where id = ?
+});
my $sth_backup_parts = $dbh->prepare(qq{
insert into backup_parts (
backup_id,
#
my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar";
-$max_file_size *= 1024;
my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir};
die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir);
my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)";
-my $tar_path = $tar_dir . '/' . $tar_file . '.tmp';
+my $tar_path_final = $tar_dir . '/' . $tar_file;
+my $tar_path = $tar_path_final . '.tmp';
+
$tar_path =~ s#//#/#g;
my $sth = $dbh->prepare(qq{
my ($backup_id) = $sth->fetchrow_array;
$sth->finish;
+
+# delete exising backup_parts
+my $sth_delete_backup_parts = $dbh->prepare(qq{
+ delete from backup_parts
+ where backup_id = ?
+});
+$sth_delete_backup_parts->execute($backup_id);
+
+
print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d});
+if (-e $tar_path_final) {
+ if ($opts{f}) {
+ rmtree $tar_path_final || die "can't remove $tar_path_final: $!";
+ } else {
+ die "$tar_path_final allready exists\n";
+ }
+}
my $fh;
my $part = 0;
my $items_in_part = 0;
sub new_tar_part {
+ my $arg = {@_};
+
if ($fh) {
return if ($current_tar_size == 0);
- print STDERR "# closing part $part\n" if ($opts{d});
+ print STDERR "\n\t+ $part:";
- # finish tar archive
+ #
+ # Finish with two null 512 byte headers,
+ # and then round out a full block.
+ #
my $data = "\0" x ($tar_header_length * 2);
TarWrite($fh, \$data);
TarWrite($fh, undef);
my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz";
+ print "$file, $size bytes, $items_in_part items";
+
$sth_backup_parts->execute(
$backup_id,
$part,
$items_in_part,
);
+ $total_increment_size += $size;
+
+ if ($arg->{close}) {
+
+ sub move($$) {
+ my ($from,$to) = @_;
+ print STDERR "# rename $from -> $to\n" if ($opts{d});
+ rename $from, $to || die "can't move $from -> $to: $!\n";
+ }
+
+ if ($part == 1) {
+ print STDERR " single" if ($opts{v});
+ move("${tar_path}/1.tar.gz", "${tar_path_final}.tar.gz");
+ move("${tar_path}/1.md5", "${tar_path_final}.md5");
+ rmtree $tar_path or die "can't remove temporary dir $tar_path: $!";
+ } else {
+ print STDERR " [last]" if ($opts{v});
+ move("${tar_path}", "${tar_path_final}");
+
+ # if this archive was single part, remove it
+ foreach my $suffix (qw/.tar.gz .md5/) {
+ my $path = $tar_path_final . $suffix;
+ unlink $path if (-e $path);
+ }
+ }
+
+ $sth_inc_size->execute(
+ $total_increment_size,
+ $part,
+ $backup_id
+ );
+
+ print "\n\ttotal $total_increment_size bytes";
+
+ return;
+ }
+
}
$part++;
# if this is first part, create directory
if ($part == 1) {
- if (-d $tar_path) {
+ if (-e $tar_path) {
print STDERR "# deleting existing $tar_path\n" if ($opts{d});
rmtree($tar_path);
}
mkdir($tar_path) || die "can't create directory $tar_path: $!";
+
+ sub abort_cleanup {
+ print STDERR "ABORTED: cleanup temp dir";
+ rmtree($tar_path);
+ $dbh->rollback;
+ exit 1;
+ }
+
+ $SIG{'INT'} = \&abort_cleanup;
+ $SIG{'QUIT'} = \&abort_cleanup;
+ $SIG{'__DIE__'} = \&abort_cleanup;
+
}
my $file = $tar_path . '/' . $part;
if (seedCache($Host, $ShareName, $Num)) {
archiveWrite($fh, '/');
archiveWriteHardLinks($fh);
+ new_tar_part( close => 1 );
} else {
print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v});
- $no_files = 1;
-}
-
-#
-# Finish with two null 512 byte headers, and then round out a full
-# block.
-#
-my $data = "\0" x ($tar_header_length * 2);
-TarWrite($fh, \$data);
-TarWrite($fh, undef);
-
-if (! close($fh)) {
+ # remove temporary files if there are no files
rmtree($tar_path);
- die "can't close archive\n";
-}
-
-# remove temporary files if there are no files
-if ($no_files) {
- rmtree($tar_path);
-} elsif ($part == 1) {
- warn "FIXME: if there is only one part move to parent directory and rename";
}
#
# Got errors, with no files or directories; exit with non-zero
# status
#
- cleanup();
- exit(1);
+ die "got errors or no files\n";
}
$sth_inc_size->finish;
$dbh->commit || die "can't commit changes to database";
$dbh->disconnect();
-exit(0);
+exit;
###########################################################################
# Subroutines
my $done = $WriteBufSz - length($WriteBuf);
if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done))
!= $WriteBufSz ) {
- print(STDERR "Unable to write to output file ($!)\n");
- exit(1);
+ die "Unable to write to output file ($!)\n";
}
while ( $done + $WriteBufSz <= length($$dataRef) ) {
if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz))
!= $WriteBufSz ) {
- print(STDERR "Unable to write to output file ($!)\n");
- exit(1);
+ die "Unable to write to output file ($!)\n";
}
$done += $WriteBufSz;
}
sub seedCache($$$) {
my ($host, $share, $dumpNo) = @_;
- print STDERR curr_time(), "getting files for $host:$share increment $dumpNo..." if ($opts{v});
+ print STDERR curr_time(), "$host:$share #$dumpNo" if ($opts{v});
my $sql = q{
SELECT path,size
FROM files
my $sth = $dbh->prepare($sql);
$sth->execute($host, $share, $dumpNo);
my $count = $sth->rows;
- print STDERR " found $count items\n" if ($opts{v});
+ print STDERR " $count items, parts:" if ($opts{v});
while (my $row = $sth->fetchrow_arrayref) {
#print STDERR "+ ", $row->[0],"\n";
$in_backup_increment->{ $row->[0] } = $row->[1];
# is this file too large to fit into MaxArchiveFileSize?
if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) {
- print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
+ print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
new_tar_part();
}