my $in_backup_increment;
-if ( !getopts("th:n:p:r:s:b:w:vd", \%opts) ) {
+if ( !getopts("th:n:p:r:s:b:w:vdf", \%opts) ) {
print STDERR <<EOF;
usage: $0 [options]
Required options:
-p pathAdd new path prefix
-b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
-w writeBufSz write buffer size (default 1048576 = 1MB)
+ -f overwrite existing parts
-v verbose output
-d debug output
EOF
inc_size = ?,
parts = ?,
inc_deleted = false
- where id = ? });
+ where id = ?
+});
my $sth_backup_parts = $dbh->prepare(qq{
insert into backup_parts (
backup_id,
#
my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar";
-$max_file_size *= 1024;
my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir};
die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir);
my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)";
-my $tar_path = $tar_dir . '/' . $tar_file . '.tmp';
+my $tar_path_final = $tar_dir . '/' . $tar_file;
+my $tar_path = $tar_path_final . '.tmp';
+
$tar_path =~ s#//#/#g;
my $sth = $dbh->prepare(qq{
my ($backup_id) = $sth->fetchrow_array;
$sth->finish;
+
+# delete exising backup_parts
+my $sth_delete_backup_parts = $dbh->prepare(qq{
+ delete from backup_parts
+ where backup_id = ?
+});
+$sth_delete_backup_parts->execute($backup_id);
+
+
print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d});
+if (-e $tar_path_final) {
+ if ($opts{f}) {
+ rmtree $tar_path_final || die "can't remove $tar_path_final: $!";
+ } else {
+ die "$tar_path_final allready exists\n";
+ }
+}
my $fh;
my $part = 0;
if ($fh) {
return if ($current_tar_size == 0);
- print STDERR "# closing part $part\n" if ($opts{d});
+ print STDERR "\n\t+ $part:";
#
# Finish with two null 512 byte headers,
my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz";
+ print "$file, $size bytes, $items_in_part items";
+
$sth_backup_parts->execute(
$backup_id,
$part,
$items_in_part,
);
- $total_increment_size += int( ( $size + 1023 ) / 1024 ) * 1024;
+ $total_increment_size += $size;
if ($arg->{close}) {
- print STDERR "# close last part\n" if ($opts{d});
+
+ sub move($$) {
+ my ($from,$to) = @_;
+ print STDERR "# rename $from -> $to\n" if ($opts{d});
+ rename $from, $to || die "can't move $from -> $to: $!\n";
+ }
+
+ if ($part == 1) {
+ print STDERR " single" if ($opts{v});
+ move("${tar_path}/1.tar.gz", "${tar_path_final}.tar.gz");
+ move("${tar_path}/1.md5", "${tar_path_final}.md5");
+ rmtree $tar_path or die "can't remove temporary dir $tar_path: $!";
+ } else {
+ print STDERR " [last]" if ($opts{v});
+ move("${tar_path}", "${tar_path_final}");
+
+ # if this archive was single part, remove it
+ foreach my $suffix (qw/.tar.gz .md5/) {
+ my $path = $tar_path_final . $suffix;
+ unlink $path if (-e $path);
+ }
+ }
+
$sth_inc_size->execute(
$total_increment_size,
$part,
$backup_id
);
+
+ print "\n\ttotal $total_increment_size bytes";
+
return;
}
# if this is first part, create directory
if ($part == 1) {
- if (-d $tar_path) {
+ if (-e $tar_path) {
print STDERR "# deleting existing $tar_path\n" if ($opts{d});
rmtree($tar_path);
}
mkdir($tar_path) || die "can't create directory $tar_path: $!";
sub abort_cleanup {
- print STDERR "ABORTED: cleanup temp dir";
+ print STDERR "ABORTED: cleanup temp dir ";
rmtree($tar_path);
$dbh->rollback;
exit 1;
if (seedCache($Host, $ShareName, $Num)) {
archiveWrite($fh, '/');
archiveWriteHardLinks($fh);
+ new_tar_part( close => 1 );
} else {
print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v});
- $no_files = 1;
-}
-
-new_tar_part( close => 1 );
-
-# remove temporary files if there are no files
-if ($no_files) {
+ # remove temporary files if there are no files
rmtree($tar_path);
-} elsif ($part == 1) {
- warn "FIXME: if there is only one part move to parent directory and rename";
}
#
sub seedCache($$$) {
my ($host, $share, $dumpNo) = @_;
- print STDERR curr_time(), "getting files for $host:$share increment $dumpNo..." if ($opts{v});
+ print STDERR curr_time(), "$host:$share #$dumpNo" if ($opts{v});
my $sql = q{
SELECT path,size
FROM files
my $sth = $dbh->prepare($sql);
$sth->execute($host, $share, $dumpNo);
my $count = $sth->rows;
- print STDERR " found $count items\n" if ($opts{v});
+ print STDERR " $count items, parts:" if ($opts{v});
while (my $row = $sth->fetchrow_arrayref) {
#print STDERR "+ ", $row->[0],"\n";
$in_backup_increment->{ $row->[0] } = $row->[1];
# is this file too large to fit into MaxArchiveFileSize?
if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) {
- print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
+ print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
new_tar_part();
}