2 #============================================================= -*-perl-*-
4 # BackupPC_tarIncCreate: create a tar archive of an existing incremental dump
9 # Usage: BackupPC_tarIncCreate [options]
14 # -h host Host from which the tar archive is created.
15 # -n dumpNum Dump number from which the tar archive is created.
16 # A negative number means relative to the end (eg -1
17 # means the most recent dump, -2 2nd most recent etc).
18 # -s shareName Share name from which the tar archive is created.
21 # -t print summary totals
22 # -r pathRemove path prefix that will be replaced with pathAdd
23 # -p pathAdd new path prefix
24 # -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
25 # -w writeBufSz write buffer size (default 1MB)
27 # The -h, -n and -s options specify which dump is used to generate
28 # the tar archive. The -r and -p options can be used to relocate
29 # the paths in the tar archive so extracted files can be placed
30 # in a location different from their original location.
33 # Craig Barratt <cbarratt@users.sourceforge.net>
34 # Ivan Klaric <iklaric@gmail.com>
35 # Dobrica Pavlinusic <dpavlin@rot13.org>
38 # Copyright (C) 2001-2003 Craig Barratt
40 # This program is free software; you can redistribute it and/or modify
41 # it under the terms of the GNU General Public License as published by
42 # the Free Software Foundation; either version 2 of the License, or
43 # (at your option) any later version.
45 # This program is distributed in the hope that it will be useful,
46 # but WITHOUT ANY WARRANTY; without even the implied warranty of
47 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
48 # GNU General Public License for more details.
50 # You should have received a copy of the GNU General Public License
51 # along with this program; if not, write to the Free Software
52 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
54 #========================================================================
56 # Version 2.1.0, released 20 Jun 2004.
58 # See http://backuppc.sourceforge.net.
60 #========================================================================
64 use lib "__INSTALLDIR__/lib";
69 use BackupPC::Attrib qw(:all);
70 use BackupPC::FileZIO;
72 use BackupPC::SearchLib;
73 use Time::HiRes qw/time/;
74 use POSIX qw/strftime/;
78 use Data::Dumper; ### FIXME
80 die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
81 my $TopDir = $bpc->TopDir();
82 my $BinDir = $bpc->BinDir();
83 my %Conf = $bpc->Conf();
84 %BackupPC::SearchLib::Conf = %Conf;
86 my $in_backup_increment;
89 if ( !getopts("th:n:p:r:s:b:w:vd", \%opts) ) {
93 -h host host from which the tar archive is created
94 -n dumpNum dump number from which the tar archive is created
95 A negative number means relative to the end (eg -1
96 means the most recent dump, -2 2nd most recent etc).
97 -s shareName share name from which the tar archive is created
100 -t print summary totals
101 -r pathRemove path prefix that will be replaced with pathAdd
102 -p pathAdd new path prefix
103 -b BLOCKS BLOCKS x 512 bytes per record (default 20; same as tar)
104 -w writeBufSz write buffer size (default 1048576 = 1MB)
111 if ( $opts{h} !~ /^([\w\.\s-]+)$/ ) {
112 die "$0: bad host name '$opts{h}'\n";
116 if ( $opts{n} !~ /^(-?\d+)$/ ) {
117 die "$0: bad dump number '$opts{n}'\n";
122 foreach my $c (qw/gzip md5sum tee/) {
123 $bin->{$c} = which($c) || die "$0 needs $c, install it\n";
126 my @Backups = $bpc->BackupInfoRead($Host);
132 my $current_tar_size = 0;
133 my $total_increment_size = 0;
136 $Num = $Backups[@Backups + $Num]{num} if ( -@Backups <= $Num && $Num < 0 );
137 for ( $i = 0 ; $i < @Backups ; $i++ ) {
138 last if ( $Backups[$i]{num} == $Num );
140 if ( $i >= @Backups ) {
141 die "$0: bad backup number $Num for host $Host\n";
144 my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ );
145 my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ );
146 if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ && $opts{s} ne "*" ) {
147 die "$0: bad share name '$opts{s}'\n";
149 our $ShareName = $opts{s};
150 our $view = BackupPC::View->new($bpc, $Host, \@Backups);
154 my $dsn = $Conf{SearchDSN};
155 my $db_user = $Conf{SearchUser} || '';
157 my $dbh = DBI->connect($dsn, $db_user, "", { RaiseError => 1, AutoCommit => 0} );
159 my $sth_inc_size = $dbh->prepare(qq{
165 my $sth_backup_parts = $dbh->prepare(qq{
166 insert into backup_parts (
173 ) values (?,?,?,?,?,?)
177 # This constant and the line of code below that uses it are borrowed
178 # from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander.
181 # Archive::Tar is Copyright 1997 Calle Dybedahl. All rights reserved.
182 # Copyright 1998 Stephen Zander. All rights reserved.
185 = 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a6 a2 a32 a32 a8 a8 a155 x12';
186 my $tar_header_length = 512;
188 my $BufSize = $opts{w} || 1048576; # 1MB or 2^20
190 my $WriteBufSz = ($opts{b} || 20) * $tar_header_length;
192 my(%UidCache, %GidCache);
193 my(%HardLinkExtraFiles, @HardLinks);
196 # Write out all the requested files/directories
199 my $max_file_size = $Conf{'MaxArchiveFileSize'} || die "problem with MaxArchiveFileSize parametar";
200 $max_file_size *= 1024;
202 my $tar_dir = $Conf{InstallDir}.'/'.$Conf{GzipTempDir};
203 die "problem with $tar_dir, check GzipTempDir in configuration\n" unless (-d $tar_dir && -w $tar_dir);
205 my $tar_file = BackupPC::SearchLib::getGzipName($Host, $ShareName, $Num) || die "can't getGzipName($Host, $ShareName, $Num)";
207 my $tar_path = $tar_dir . '/' . $tar_file . '.tmp';
208 $tar_path =~ s#//#/#g;
210 my $sth = $dbh->prepare(qq{
214 JOIN shares on shares.id = shareid
215 JOIN hosts on hosts.id = shares.hostid
216 WHERE hosts.name = ? and shares.name = ? and backups.num = ?
218 $sth->execute($Host, $ShareName, $Num);
219 my ($backup_id) = $sth->fetchrow_array;
222 print STDERR "backup_id: $backup_id working dir: $tar_dir, max uncompressed size $max_file_size bytes, tar $tar_file\n" if ($opts{d});
228 my $items_in_part = 0;
234 return if ($current_tar_size == 0);
236 print STDERR "# closing part $part\n" if ($opts{d});
239 # Finish with two null 512 byte headers,
240 # and then round out a full block.
242 my $data = "\0" x ($tar_header_length * 2);
243 TarWrite($fh, \$data);
244 TarWrite($fh, undef);
246 close($fh) || die "can't close archive part $part: $!";
248 my $file = $tar_path . '/' . $part;
250 my $md5 = read_file( $file . '.md5' ) || die "can't read md5sum file ${file}.md5";
253 my $size = (stat( $file . '.tar.gz' ))[7] || die "can't stat ${file}.tar.gz";
255 $sth_backup_parts->execute(
264 $total_increment_size += int( ( $size + 1023 ) / 1024 ) * 1024;
267 print STDERR "# close last part\n" if ($opts{d});
268 $sth_inc_size->execute(
269 $total_increment_size,
280 # if this is first part, create directory
284 print STDERR "# deleting existing $tar_path\n" if ($opts{d});
287 mkdir($tar_path) || die "can't create directory $tar_path: $!";
290 print STDERR "ABORTED: cleanup temp dir";
296 $SIG{'INT'} = \&abort_cleanup;
297 $SIG{'QUIT'} = \&abort_cleanup;
298 $SIG{'__DIE__'} = \&abort_cleanup;
302 my $file = $tar_path . '/' . $part;
305 # create comprex pipe which will pass output through gzip
306 # for compression, create file on disk using tee
307 # and pipe same output to md5sum to create checksum
310 my $cmd = '| ' . $bin->{'gzip'} . ' ' . $Conf{GzipLevel} . ' ' .
311 '| ' . $bin->{'tee'} . ' ' . $file . '.tar.gz' . ' ' .
312 '| ' . $bin->{'md5sum'} . ' - > ' . $file . '.md5';
314 print STDERR "## $cmd\n" if ($opts{d});
316 open($fh, $cmd) or die "can't open $cmd: $!";
319 $current_tar_size = 0;
325 if (seedCache($Host, $ShareName, $Num)) {
326 archiveWrite($fh, '/');
327 archiveWriteHardLinks($fh);
329 print STDERR "NOTE: no files found for $Host:$ShareName, increment $Num\n" if ($opts{v});
333 new_tar_part( close => 1 );
335 # remove temporary files if there are no files
338 } elsif ($part == 1) {
339 warn "FIXME: if there is only one part move to parent directory and rename";
343 # print out totals if requested
346 print STDERR "Done: $FileCnt files, $ByteCnt bytes, $DirCnt dirs,",
347 " $SpecialCnt specials, $ErrorCnt errors\n";
349 if ( $ErrorCnt && !$FileCnt && !$DirCnt ) {
351 # Got errors, with no files or directories; exit with non-zero
354 die "got errors or no files\n";
357 $sth_inc_size->finish;
358 $sth_backup_parts->finish;
360 $dbh->commit || die "can't commit changes to database";
365 ###########################################################################
367 ###########################################################################
371 my($fh, $dir, $tarPathOverride) = @_;
373 if ( $dir =~ m{(^|/)\.\.(/|$)} ) {
374 print(STDERR "$0: bad directory '$dir'\n");
378 $dir = "/" if ( $dir eq "." );
379 #print(STDERR "calling find with $Num, $ShareName, $dir\n");
381 if ( $view->find($Num, $ShareName, $dir, 0, \&TarWriteFile,
382 $fh, $tarPathOverride) < 0 ) {
383 print(STDERR "$0: bad share or directory '$ShareName/$dir'\n");
390 # Write out any hardlinks (if any)
392 sub archiveWriteHardLinks
395 foreach my $hdr ( @HardLinks ) {
397 if ( defined($PathRemove)
398 && substr($hdr->{linkname}, 0, length($PathRemove)+1)
399 eq ".$PathRemove" ) {
400 substr($hdr->{linkname}, 0, length($PathRemove)+1) = ".$PathAdd";
402 TarWriteFileInfo($fh, $hdr);
405 %HardLinkExtraFiles = ();
412 $UidCache{$uid} = (getpwuid($uid))[0] if ( !exists($UidCache{$uid}) );
413 return $UidCache{$uid};
420 $GidCache{$gid} = (getgrgid($gid))[0] if ( !exists($GidCache{$gid}) );
421 return $GidCache{$gid};
426 my($fh, $dataRef) = @_;
429 if ( !defined($dataRef) ) {
431 # do flush by padding to a full $WriteBufSz
433 my $data = "\0" x ($WriteBufSz - length($WriteBuf));
437 # poor man's tell :-)
438 $current_tar_size += length($$dataRef);
440 if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) {
442 # just buffer and return
444 $WriteBuf .= $$dataRef;
447 my $done = $WriteBufSz - length($WriteBuf);
448 if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done))
450 die "Unable to write to output file ($!)\n";
452 while ( $done + $WriteBufSz <= length($$dataRef) ) {
453 if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz))
455 die "Unable to write to output file ($!)\n";
457 $done += $WriteBufSz;
459 $WriteBuf = substr($$dataRef, $done);
466 if ( $size % $tar_header_length ) {
467 my $data = "\0" x ($tar_header_length - ($size % $tar_header_length));
468 TarWrite($fh, \$data);
476 $hdr->{uname} = UidLookup($hdr->{uid}) if ( !defined($hdr->{uname}) );
477 $hdr->{gname} = GidLookup($hdr->{gid}) if ( !defined($hdr->{gname}) );
478 my $devmajor = defined($hdr->{devmajor}) ? sprintf("%07o", $hdr->{devmajor})
480 my $devminor = defined($hdr->{devminor}) ? sprintf("%07o", $hdr->{devminor})
483 if ( $hdr->{size} >= 2 * 65536 * 65536 ) {
485 # GNU extension for files >= 8GB: send size in big-endian binary
487 $sizeStr = pack("c4 N N", 0x80, 0, 0, 0,
488 $hdr->{size} / (65536 * 65536),
489 $hdr->{size} % (65536 * 65536));
490 } elsif ( $hdr->{size} >= 1 * 65536 * 65536 ) {
492 # sprintf octal only handles up to 2^32 - 1
494 $sizeStr = sprintf("%03o", $hdr->{size} / (1 << 24))
495 . sprintf("%08o", $hdr->{size} % (1 << 24));
497 $sizeStr = sprintf("%011o", $hdr->{size});
499 my $data = pack($tar_pack_header,
500 substr($hdr->{name}, 0, 99),
501 sprintf("%07o", $hdr->{mode}),
502 sprintf("%07o", $hdr->{uid}),
503 sprintf("%07o", $hdr->{gid}),
505 sprintf("%011o", $hdr->{mtime}),
506 "", #checksum field - space padded by pack("A8")
508 substr($hdr->{linkname}, 0, 99),
509 $hdr->{magic} || 'ustar ',
510 $hdr->{version} || ' ',
517 substr($data, 148, 7) = sprintf("%06o\0", unpack("%16C*",$data));
518 TarWrite($fh, \$data);
526 # Handle long link names (symbolic links)
528 if ( length($hdr->{linkname}) > 99 ) {
530 my $data = $hdr->{linkname} . "\0";
531 $h{name} = "././\@LongLink";
533 $h{size} = length($data);
534 TarWriteHeader($fh, \%h);
535 TarWrite($fh, \$data);
536 TarWritePad($fh, length($data));
539 # Handle long file names
541 if ( length($hdr->{name}) > 99 ) {
543 my $data = $hdr->{name} . "\0";
544 $h{name} = "././\@LongLink";
546 $h{size} = length($data);
547 TarWriteHeader($fh, \%h);
548 TarWrite($fh, \$data);
549 TarWritePad($fh, length($data));
551 TarWriteHeader($fh, $hdr);
555 # seed cache of files in this increment
558 my ($host, $share, $dumpNo) = @_;
560 print STDERR curr_time(), "getting files for $host:$share increment $dumpNo..." if ($opts{v});
564 JOIN shares on shares.id = shareid
565 JOIN hosts on hosts.id = shares.hostid
566 WHERE hosts.name = ? and shares.name = ? and backupnum = ?
569 my $sth = $dbh->prepare($sql);
570 $sth->execute($host, $share, $dumpNo);
571 my $count = $sth->rows;
572 print STDERR " found $count items\n" if ($opts{v});
573 while (my $row = $sth->fetchrow_arrayref) {
574 #print STDERR "+ ", $row->[0],"\n";
575 $in_backup_increment->{ $row->[0] } = $row->[1];
584 # calculate overhad for one file in tar
586 sub tar_overhead($) {
587 my $name = shift || '';
589 # header, padding of file and two null blocks at end
590 my $len = 4 * $tar_header_length;
592 # if filename is longer than 99 chars subtract blocks for
594 if ( length($name) > 99 ) {
595 $len += int( ( length($name) + $tar_header_length ) / $tar_header_length ) * $tar_header_length;
606 my($hdr, $fh, $tarPathOverride) = @_;
608 my $tarPath = $hdr->{relPath};
609 $tarPath = $tarPathOverride if ( defined($tarPathOverride) );
611 $tarPath =~ s{//+}{/}g;
613 #print STDERR "? $tarPath\n" if ($opts{d});
614 my $size = $in_backup_increment->{$tarPath};
615 return unless (defined($size));
617 # is this file too large to fit into MaxArchiveFileSize?
619 if ( ($current_tar_size + tar_overhead($tarPath) + $size) > $max_file_size ) {
620 print STDERR "# tar file $current_tar_size + $tar_header_length + $size > $max_file_size, splitting\n" if ($opts{d});
624 #print STDERR "A $tarPath [$size] tell: $current_tar_size\n" if ($opts{d});
627 if ( defined($PathRemove)
628 && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) {
629 substr($tarPath, 0, length($PathRemove)) = $PathAdd;
631 $tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// );
632 $tarPath =~ s{//+}{/}g;
633 $hdr->{name} = $tarPath;
635 if ( $hdr->{type} == BPC_FTYPE_DIR ) {
637 # Directory: just write the header
639 $hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} );
640 TarWriteFileInfo($fh, $hdr);
642 } elsif ( $hdr->{type} == BPC_FTYPE_FILE ) {
644 # Regular file: write the header and file
646 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
647 if ( !defined($f) ) {
648 print(STDERR "Unable to open file $hdr->{fullPath}\n");
652 # do we need to split file?
653 if ($hdr->{size} < $max_file_size) {
654 TarWriteFileInfo($fh, $hdr);
656 while ( $f->read(\$data, $BufSize) > 0 ) {
657 TarWrite($fh, \$data);
658 $size += length($data);
661 TarWritePad($fh, $size);
665 my $full_size = $hdr->{size};
666 my $orig_name = $hdr->{name};
667 my $max_part_size = $max_file_size - tar_overhead($hdr->{name});
669 my $parts = int(($full_size + $max_part_size - 1) / $max_part_size);
670 print STDERR "# splitting $orig_name [$full_size bytes] into $parts parts\n" if ($opts{d});
671 foreach my $subpart ( 1 .. $parts ) {
673 if ($subpart < $parts) {
674 $hdr->{size} = $max_part_size;
676 $hdr->{size} = $full_size % $max_part_size;
678 $hdr->{name} = $orig_name . '/' . $subpart;
679 print STDERR "## creating part $subpart ",$hdr->{name}, " [", $hdr->{size}," bytes]\n";
681 TarWriteFileInfo($fh, $hdr);
684 for ( 1 .. int($hdr->{size} / $BufSize) ) {
685 my $r_size = $f->read(\$data, $BufSize);
686 die "expected $BufSize bytes read, got $r_size bytes!" if ($r_size != $BufSize);
687 TarWrite($fh, \$data);
688 $size += length($data);
691 my $size_left = $hdr->{size} % $BufSize;
692 my $r_size = $f->read(\$data, $size_left);
693 die "expected $size_left bytes last read, got $r_size bytes!" if ($r_size != $size_left);
695 TarWrite($fh, \$data);
696 $size += length($data);
697 TarWritePad($fh, $size);
703 $ByteCnt += $full_size;
706 } elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) {
708 # Hardlink file: either write a hardlink or the complete file
709 # depending upon whether the linked-to file will be written
712 # Start by reading the contents of the link.
714 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
715 if ( !defined($f) ) {
716 print(STDERR "Unable to open file $hdr->{fullPath}\n");
721 while ( $f->read(\$data, $BufSize) > 0 ) {
722 $hdr->{linkname} .= $data;
726 my $name = $hdr->{linkname};
728 if ( $HardLinkExtraFiles{$name} ) {
730 # Target file will be or was written, so just remember
731 # the hardlink so we can dump it later.
733 push(@HardLinks, $hdr);
737 # Have to dump the original file. Just call the top-level
738 # routine, so that we save the hassle of dealing with
739 # mangling, merging and attributes.
741 $HardLinkExtraFiles{$hdr->{linkname}} = 1;
742 archiveWrite($fh, $hdr->{linkname}, $hdr->{name});
744 } elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) {
746 # Symbolic link: read the symbolic link contents into the header
747 # and write the header.
749 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0, $hdr->{compress});
750 if ( !defined($f) ) {
751 print(STDERR "Unable to open symlink file $hdr->{fullPath}\n");
756 while ( $f->read(\$data, $BufSize) > 0 ) {
757 $hdr->{linkname} .= $data;
761 TarWriteFileInfo($fh, $hdr);
763 } elsif ( $hdr->{type} == BPC_FTYPE_CHARDEV
764 || $hdr->{type} == BPC_FTYPE_BLOCKDEV
765 || $hdr->{type} == BPC_FTYPE_FIFO ) {
767 # Special files: for char and block special we read the
768 # major and minor numbers from a plain file.
770 if ( $hdr->{type} != BPC_FTYPE_FIFO ) {
771 my $f = BackupPC::FileZIO->open($hdr->{fullPath}, 0,
774 if ( !defined($f) || $f->read(\$data, $BufSize) < 0 ) {
775 print(STDERR "Unable to open/read char/block special file"
776 . " $hdr->{fullPath}\n");
777 $f->close if ( defined($f) );
782 if ( $data =~ /(\d+),(\d+)/ ) {
783 $hdr->{devmajor} = $1;
784 $hdr->{devminor} = $2;
788 TarWriteFileInfo($fh, $hdr);
791 print(STDERR "Got unknown type $hdr->{type} for $hdr->{name}\n");
796 my $t_fmt = '%Y-%m-%d %H:%M:%S';
798 return strftime($t_fmt,localtime());