# Craig Barratt <cbarratt@users.sourceforge.net>
#
# COPYRIGHT
-# Copyright (C) 2002 Craig Barratt
+# Copyright (C) 2002-2003 Craig Barratt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
#
#========================================================================
#
-# Version 1.6.0_CVS, released 10 Dec 2002.
+# Version 3.0.0, released 28 Jan 2007.
#
# See http://backuppc.sourceforge.net.
#
use BackupPC::Lib;
use BackupPC::Attrib qw(:all);
use BackupPC::FileZIO;
+use Data::Dumper;
sub new
{
}, $class;
for ( my $i = 0 ; $i < @{$m->{backups}} ; $i++ ) {
next if ( defined($m->{backups}[$i]{level}) );
- $m->{backups}[$i]{level} = $m->{backups}[$i]{type} eq "full" ? 0 : 1;
+ $m->{backups}[$i]{level} = $m->{backups}[$i]{type} eq "incr" ? 1 : 0;
}
$m->{topDir} = $m->{bpc}->TopDir();
return $m;
my($m, $backupNum, $share, $dir) = @_;
my($i, $level);
+ #print STDERR "dirCache($backupNum, $share, $dir)\n";
$dir = "/$dir" if ( $dir !~ m{^/} );
$dir =~ s{/+$}{};
return if ( $m->{num} == $backupNum
&& $m->{share} eq $share
+ && defined($m->{dir})
&& $m->{dir} eq $dir );
- if ( $m->{num} != $backupNum ) {
- for ( $i = 0 ; $i < @{$m->{backups}} ; $i++ ) {
- last if ( $m->{backups}[$i]{num} == $backupNum );
- }
- if ( $i >= @{$m->{backups}} ) {
- $m->{idx} = -1;
- return;
- }
- $m->{num} = $backupNum;
- $m->{idx} = $i;
- }
+ $m->backupNumCache($backupNum) if ( $m->{num} != $backupNum );
+ return if ( $m->{idx} < 0 );
+
$m->{files} = {};
$level = $m->{backups}[$m->{idx}]{level} + 1;
#
$m->{mergeNums} = [];
for ( $i = $m->{idx} ; $level > 0 && $i >= 0 ; $i-- ) {
- #print("Do $i ($m->{backups}[$i]{noFill},$m->{backups}[$i]{level})\n");
+ #print(STDERR "Do $i ($m->{backups}[$i]{noFill},$m->{backups}[$i]{level})\n");
#
# skip backups with the same or higher level
#
$sharePathM = $share . $dir;
}
$path .= $sharePathM;
- #print("Opening $path\n");
+ #print(STDERR "Opening $path (share=$share)\n");
if ( !opendir(DIR, $path) ) {
if ( $i == $m->{idx} ) {
#
}
}
foreach my $file ( @dir ) {
- $file = $1 if ( $file =~ /(.*)/ );
+ $file = $1 if ( $file =~ /(.*)/s );
my $fileUM = $file;
$fileUM = $m->{bpc}->fileNameUnmangle($fileUM) if ( $mangle );
+ #print(STDERR "Doing $fileUM\n");
#
# skip special files
#
next if ( defined($m->{files}{$fileUM})
|| $file eq ".."
|| $file eq "."
+ || $file eq "backupInfo"
|| $mangle && $file eq "attrib" );
#
# skip directories in earlier backups (each backup always
}
}
}
- $m->{files}{$fileUM}{relPath} = "$dir/$fileUM";
- $m->{files}{$fileUM}{sharePathM} = "$sharePathM/$file";
- $m->{files}{$fileUM}{fullPath} = "$path/$file";
- $m->{files}{$fileUM}{backupNum} = $backupNum;
- $m->{files}{$fileUM}{compress} = $compress;
- $m->{files}{$fileUM}{nlink} = $s[3];
- $m->{files}{$fileUM}{inode} = $s[1];
+ ($m->{files}{$fileUM}{relPath} = "$dir/$fileUM") =~ s{//+}{/}g;
+ ($m->{files}{$fileUM}{sharePathM} = "$sharePathM/$file")
+ =~ s{//+}{/}g;
+ ($m->{files}{$fileUM}{fullPath} = "$path/$file") =~ s{//+}{/}g;
+ $m->{files}{$fileUM}{backupNum} = $backupNum;
+ $m->{files}{$fileUM}{compress} = $compress;
+ $m->{files}{$fileUM}{nlink} = $s[3];
+ $m->{files}{$fileUM}{inode} = $s[1];
}
#
# Also include deleted files
next if ( $m->{files}{$file}{type} != BPC_FTYPE_DELETED );
delete($m->{files}{$file});
}
+ #print STDERR "Returning:\n", Dumper($m->{files});
+}
+
+#
+# Return list of shares for this backup
+#
+sub shareList
+{
+ my($m, $backupNum) = @_;
+ my @shareList;
+
+ $m->backupNumCache($backupNum) if ( $m->{num} != $backupNum );
+ return if ( $m->{idx} < 0 );
+
+ my $mangle = $m->{backups}[$m->{idx}]{mangle};
+ my $path = "$m->{topDir}/pc/$m->{host}/$backupNum/";
+ return if ( !opendir(DIR, $path) );
+ my @dir = readdir(DIR);
+ closedir(DIR);
+ foreach my $file ( @dir ) {
+ $file = $1 if ( $file =~ /(.*)/s );
+ next if ( $file eq "attrib" && $mangle
+ || $file eq "."
+ || $file eq ".."
+ || $file eq "backupInfo"
+ );
+ my $fileUM = $file;
+ $fileUM = $m->{bpc}->fileNameUnmangle($fileUM) if ( $mangle );
+ push(@shareList, $fileUM);
+ }
+ $m->{dir} = undef;
+ return @shareList;
+}
+
+sub backupNumCache
+{
+ my($m, $backupNum) = @_;
+
+ if ( $m->{num} != $backupNum ) {
+ my $i;
+ for ( $i = 0 ; $i < @{$m->{backups}} ; $i++ ) {
+ last if ( $m->{backups}[$i]{num} == $backupNum );
+ }
+ if ( $i >= @{$m->{backups}} ) {
+ $m->{idx} = -1;
+ return;
+ }
+ $m->{num} = $backupNum;
+ $m->{idx} = $i;
+ }
}
#
sub fileAttrib
{
my($m, $backupNum, $share, $path) = @_;
- my $dir = $path;
- $dir =~ s{(.*)/(.*)}{$1};
- my $file = $2;
- $m->dirCache($backupNum, $share, $dir);
- return $m->{files}{$file};
+ #print(STDERR "fileAttrib($backupNum, $share, $path)\n");
+ if ( $path =~ s{(.*)/+(.+)}{$1}s ) {
+ my $file = $2;
+ $m->dirCache($backupNum, $share, $path);
+ return $m->{files}{$file};
+ } else {
+ #print STDERR "Got empty $path\n";
+ $m->dirCache($backupNum, "", "");
+ my $attr = $m->{files}{$share};
+ return if ( !defined($attr) );
+ $attr->{relPath} = "/";
+ return $attr;
+ }
}
#
return $m->{files};
}
+#
+# Return a listref of backup numbers that are merged to create this view
+#
sub mergeNums
{
my($m) = @_;
return $m->{mergeNums};
}
+#
+# Return a list of backup indexes for which the directory exists
+#
sub backupList
{
my($m, $share, $dir) = @_;
}
$path .= $sharePathM;
next if ( !-d $path );
- push(@backupList, $backupNum);
+ push(@backupList, $i);
}
return @backupList;
}
+#
+# Return the history of all backups for a particular directory
+#
+sub dirHistory
+{
+ my($m, $share, $dir) = @_;
+ my($i, $level);
+ my $files = {};
+
+ $dir = "/$dir" if ( $dir !~ m{^/} );
+ $dir =~ s{/+$}{};
+
+ #
+ # merge backups, starting at the first one, and working
+ # forward.
+ #
+ for ( $i = 0 ; $i < @{$m->{backups}} ; $i++ ) {
+ $level = $m->{backups}[$i]{level};
+ my $backupNum = $m->{backups}[$i]{num};
+ my $mangle = $m->{backups}[$i]{mangle};
+ my $compress = $m->{backups}[$i]{compress};
+ my $path = "$m->{topDir}/pc/$m->{host}/$backupNum/";
+ my $sharePathM;
+ if ( $mangle ) {
+ $sharePathM = $m->{bpc}->fileNameEltMangle($share)
+ . $m->{bpc}->fileNameMangle($dir);
+ } else {
+ $sharePathM = $share . $dir;
+ }
+ $path .= $sharePathM;
+ #print(STDERR "Opening $path (share=$share)\n");
+ if ( !opendir(DIR, $path) ) {
+ #
+ # Oops, directory doesn't exist.
+ #
+ next;
+ }
+ my @dir = readdir(DIR);
+ closedir(DIR);
+ my $attr;
+ if ( $mangle ) {
+ $attr = BackupPC::Attrib->new({ compress => $compress });
+ if ( -f $attr->fileName($path) && !$attr->read($path) ) {
+ $m->{error} = "Can't read attribute file in $path";
+ $attr = undef;
+ }
+ }
+ foreach my $file ( @dir ) {
+ $file = $1 if ( $file =~ /(.*)/s );
+ my $fileUM = $file;
+ $fileUM = $m->{bpc}->fileNameUnmangle($fileUM) if ( $mangle );
+ #print(STDERR "Doing $fileUM\n");
+ #
+ # skip special files
+ #
+ next if ( $file eq ".."
+ || $file eq "."
+ || $mangle && $file eq "attrib"
+ || defined($files->{$fileUM}[$i]) );
+ my @s = stat("$path/$file");
+ if ( defined($attr) && defined(my $a = $attr->get($fileUM)) ) {
+ $files->{$fileUM}[$i] = $a;
+ $attr->set($fileUM, undef);
+ } else {
+ #
+ # Very expensive in the non-attribute case when compresseion
+ # is on. We have to stat the file and read compressed files
+ # to determine their size.
+ #
+ $files->{$fileUM}[$i] = {
+ type => -d _ ? BPC_FTYPE_DIR : BPC_FTYPE_FILE,
+ mode => $s[2],
+ uid => $s[4],
+ gid => $s[5],
+ size => -f _ ? $s[7] : 0,
+ mtime => $s[9],
+ };
+ if ( $compress && -f _ ) {
+ #
+ # Compute the correct size by reading the whole file
+ #
+ my $f = BackupPC::FileZIO->open("$path/$file",
+ 0, $compress);
+ if ( !defined($f) ) {
+ $m->{error} = "Can't open $path/$file";
+ } else {
+ my($data, $size);
+ while ( $f->read(\$data, 65636 * 8) > 0 ) {
+ $size += length($data);
+ }
+ $f->close;
+ $files->{$fileUM}[$i]{size} = $size;
+ }
+ }
+ }
+ ($files->{$fileUM}[$i]{relPath} = "$dir/$fileUM") =~ s{//+}{/}g;
+ ($files->{$fileUM}[$i]{sharePathM} = "$sharePathM/$file")
+ =~ s{//+}{/}g;
+ ($files->{$fileUM}[$i]{fullPath} = "$path/$file") =~ s{//+}{/}g;
+ $files->{$fileUM}[$i]{backupNum} = $backupNum;
+ $files->{$fileUM}[$i]{compress} = $compress;
+ $files->{$fileUM}[$i]{nlink} = $s[3];
+ $files->{$fileUM}[$i]{inode} = $s[1];
+ }
+
+ #
+ # Flag deleted files
+ #
+ if ( defined($attr) ) {
+ my $a = $attr->get;
+ foreach my $fileUM ( keys(%$a) ) {
+ next if ( $a->{$fileUM}{type} != BPC_FTYPE_DELETED );
+ $files->{$fileUM}[$i]{type} = BPC_FTYPE_DELETED;
+ }
+ }
+
+ #
+ # Merge old backups. Don't merge directories from old
+ # backups because every backup has an accurate directory
+ # tree.
+ #
+ for ( my $k = $i - 1 ; $level > 0 && $k >= 0 ; $k-- ) {
+ next if ( $m->{backups}[$k]{level} >= $level );
+ $level = $m->{backups}[$k]{level};
+ foreach my $fileUM ( keys(%$files) ) {
+ next if ( !defined($files->{$fileUM}[$k])
+ || defined($files->{$fileUM}[$i])
+ || $files->{$fileUM}[$k]{type} == BPC_FTYPE_DIR );
+ $files->{$fileUM}[$i] = $files->{$fileUM}[$k];
+ }
+ }
+ }
+
+ #
+ # Remove deleted files
+ #
+ for ( $i = 0 ; $i < @{$m->{backups}} ; $i++ ) {
+ foreach my $fileUM ( keys(%$files) ) {
+ next if ( !defined($files->{$fileUM}[$i])
+ || $files->{$fileUM}[$i]{type} != BPC_FTYPE_DELETED );
+ $files->{$fileUM}[$i] = undef;
+ }
+ }
+
+ #print STDERR "Returning:\n", Dumper($files);
+ return $files;
+}
+
+
#
# Do a recursive find starting at the given path (either a file
# or directory). The callback function $callback is called on each
{
my($m, $backupNum, $share, $path, $depth, $callback, @callbackArgs) = @_;
+ #print(STDERR "find: got $backupNum, $share, $path\n");
#
# First call the callback on the given $path
#
my $attr = $m->dirAttrib($backupNum, $share, $path);
return if ( !defined($attr) );
- foreach my $file ( keys(%$attr) ) {
+ foreach my $file ( sort(keys(%$attr)) ) {
&$callback($attr->{$file}, @callbackArgs);
next if ( !$depth || $attr->{$file}{type} != BPC_FTYPE_DIR );
#