--- /dev/null
+#========================================================================
+#
+# ChangeLog - change log for BackupPC.
+#
+# DESCRIPTION
+# Revision history for BackupPC, detailing significant changes between
+# versions, most recent first.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+#------------------------------------------------------------------------
+# Version 1.5.0, 2 Aug 2002
+#------------------------------------------------------------------------
+
+* Changed conf/config.pl so that $Conf{TarIncrArgs} uses the --newer
+ option instead of --newer-mtime. Also removed --atime-preserve from
+ $Conf{TarClientCmd}. This makes the default settings work better
+ with tripwire.
+
+* Fixed configure.pl so it correctly detects a running BackupPC <= v1.4.0
+ so it can correctly warn the user to stop it before upgrading. Reported
+ by David Holland.
+
+* Added missing ";" to entity escape in EscapeHTML in BackupPC_Admin.
+ Reported by Guillaume Filion.
+
+* Added LDAP setup to documentation from David Holland.
+
+* Tar.pm now adds a "." to file paths that start with "/", so that all
+ tar paths are relative. From Ludovic Drolez.
+
+#------------------------------------------------------------------------
+# Version 1.5.0beta0, 30 Jun 2002
+#------------------------------------------------------------------------
+
+* A full set of restore options is now supported, including direct
+ restore via smbclient or tar or downloading a zip or tar file.
+
+* Major additions to CGI script to support better directory navigation,
+ restore features and mod_perl. Also, file downloads from the CGI
+ interface now correctly preserve the file name and provide the
+ correct Content-Type for the most common types of files. Improved
+ directory navigation was contributed by Ryan Kucera.
+
+* New script BackupPC_zipCreate (contributed by Guillaume Filion) is the
+ zip analog of BackupPC_tarCreate. BackupPC_zipCreate can be used to
+ create a zip archive of any portion of a backup.
+
+* Substantial additions to BackupPC_tarCreate to support restore,
+ including modifying path names, handling hardlinks, fixing
+ support of old backups without attributes (pre-v1.4.0). Plus
+ BackupPC_tarCreate is now an offical part of the release.
+ (Lack of support for hardlinks was reported by John Stanley.)
+
+* BackupPC_tarExtract now supports hardlinks and fixed pooling of
+ attribute files.
+
+* A unix domain socket is now used for communication between the CGI
+ interface and BackupPC. The original TCP socket is optional. Sockets
+ are correctly re-initialized if config.pl is updated with new socket
+ settings.
+
+* For improved security messages over the unix or TCP socket are protected
+ via an MD5 digest based on a shared secret, a sequence number, a time
+ stamp and a unique per-connection number.
+
+* Additions to configure.pl to support install of directory navigation
+ images.
+
+* Fixed case where $Conf{BackupFilesOnly} or $Conf{BackupFilesExclude}
+ were set to a single string or list (in v1.4.0 only the case of
+ hash worked correctly). Reported by Phillip Bertolus.
+
+* Fixed case of $Conf{BackoutGoodCnt} == 0. This setting now makes the
+ client always subject to blackout, matching the comments in config.pl.
+ Also fixed handling of $Conf{BackoutGoodCnt} < 0 in the CGI script
+ reported by Pascal Schelcher.
+
+* Fixed byte and file totals for tar backups, reported by several users.
+
+* Fixed --newer-mtime date/timestamp format to make it ISO 8601 compliant,
+ suggested by Erminio Baranzini.
+
+* Fixed handling of $Conf{BackupFilesOnly} in BackupPC::Xfer::Tar.pm, as
+ well as shell escaping of tar arguments.
+
+* Fixed entity encoding of 8-bit characters in the CGI interface.
+
+* Added optional CGI headers in $Conf{CgiHeaders} that by default
+ is set to a no-cache pragma. Suggested by Benno Zuure.
+
+#------------------------------------------------------------------------
+# Version 1.4.0, 16 Mar 2002
+#------------------------------------------------------------------------
+
+* BackupPC now supports tar (in addition to smb) for extracting host
+ data. This is the most convenient option for linux/unix hosts.
+ Tar can be configured to run over ssh, rsh or to backup a local
+ nfs mount from the host.
+
+* Support for special files, including symbolic links, fifo, character
+ and block device files has been added, so that all native linux/unix
+ file types can be correctly backed up when using tar transport.
+ Special files are all stored as regular files and the type attributes
+ are used to remember the original file type.
+
+* All unix file attributes are now saved (and pooled when possible).
+ This includes user and group ownership, permissions, and modification
+ time. Smbclient also does a reasonable job of emulating unix
+ permissions (such as mtime), and these attributes get saved too.
+
+* The new default is to not fill incremental dumps. configure.pl
+ automatically sets $Conf{IncrFill} to 0. The default was 1
+ (incrementals were filled with hardlinks). Since the CGI
+ script does filling at browsing time, there is no need to
+ fill incremental dumps.
+
+* Backup file names are now stored in "mangled" form. Each node of a
+ path is preceded by "f", and special characters (\n, \r, % and /) are
+ URI-encoded as "%xx", where xx is the ascii character's hex value. So
+ c:/craig/example.txt is now stored as fc/fcraig/fexample.txt. This
+ was done mainly so meta-data could be stored alongside the backup
+ files without name collisions. In particular, the attributes for the
+ files in a directory are stored in a file called "attrib", and
+ mangling avoids file name collisions (I discarded the idea of having
+ a duplicate directory tree for every backup just to store the
+ attributes). Other meta-data (eg: rsync checksums) could be stored in
+ file names preceded by, eg, "c". There are two other benefits to
+ mangling: the share name might contain "/" (eg: "/home/craig" for tar
+ transport), and I wanted that represented as a single level in the
+ storage tree. Secondly, as files are written to NewFileList for later
+ processing by BackupPC_link, embedded newlines in the file's path
+ will cause problems which are avoided by mangling.
+
+ The CGI script undoes the mangling, so it is invisibe to the user.
+ Of course, old (unmangled) backups are still supported by the CGI
+ interface.
+
+* Various changes to the CGI interface, BackupPC_Admin:
+
+ + Added button that allows users to manually start a full dump in
+ addition to the existing incremental dump.
+
+ + Added display of file attributes when browsing backups.
+
+ + Added an optional holdoff time specified by the user when canceling
+ a backup. BackupPC will not attempt any new backups for at least the
+ specified time. This holdoff time can be changed whether or not a
+ backup is running.
+
+ + Added supports for file mangling, and correct merging of unfilled
+ backups from mangled or unmangled (and compressed or uncompressed)
+ fulls when browsing or restoring.
+
+ + Only displays a "Start Incr Backup" button if there are already some
+ backups.
+
+ + For DHCP hosts, when a user tries to manually start a backup, add
+ a check for the netbios name of both the host the request came
+ from (REMOTE_ADDR) and the last known DHCP address for that host
+ to see if either address matches the host. If not, an error
+ message is display. The previous behavior was that only requests
+ from the client itself succeeded, and requests from other machines
+ quietly failed.
+
+* Changed the version numbering to X.Y.Z, instead of X.0Y. This release
+ is 1.4.0. The first digit is for major new releases, the middle digit
+ is for significant feature releases and improvements, and the last
+ digit is for bug fixes. You should think of the old 1.00, 1.01, 1.02
+ and 1.03 as 1.0.0, ..., 1.3.0.
+
+* BackupPC and the CGI script BackupPC_Admin now check that the effective
+ user id is correct to avoid accidentally launching BackupPC as the
+ wrong user or detecting CGI configuration problems. This behavior
+ can be turned off using the $Conf{BackupPCUserVerify} option.
+
+* In numerous places changed "Smb" to "Xfer" (eg: log file names) to
+ support generic names for both smb and tar transport methods. The
+ CGI script checks for old names for backward compatibility.
+
+* Major changed to Backup_dump to support new tar transport. All transport
+ specific code moved into BackupPC::Xfer::Smb and BackupPC::Xfer::Tar
+ objects.
+
+* Added workaround for a bug in Samba's smbclient for files between 2GB
+ and 4GB. The file size in the tar header is incorrect. This allows
+ files up to 4GB to work with smbclient, rather than 2GB. To support
+ files larger than 2GB you must make sure perl is compiled with the
+ uselargefiles option (use "perl -V | egrep largefiles" to check) and
+ the pool directory must be on a file system that supports large files.
+
+* Moved the pool writing code into a module BackupPC::PoolWrite. This
+ allows the clever file pool checking (digest, uncompressing, comparing
+ etc with minimum disk IO) to be used easily in multiple places (eg: it
+ is now used for writing attribute files so they can be pooled).
+
+* Changed MD5 to Digest::MD5 to avoid use of the depreceated MD5 module.
+
+* Shortened default $Conf{MyPath} so that perl's taint mode is more likely
+ to be happy. The old $Conf{MyPath} contained /usr/local/bin, which
+ on one user's machine was world writable and perl -T correctly
+ complained about it.
+
+* Fixed ping command options in Lib.pm so that it works on OpenBSD.
+ Thanks to Kyle Amon for sending the fix. Decided to move the
+ ping options from Lib.pm into config.pl (as $Conf{PingArgs}) and
+ now configure.pl tries to come up with a sensible default based on
+ the OS.
+
+* Fixed argument checking in BackupPC_tarExtract to allow '$' in the
+ share name (eg: C$). Thanks to Jules Agee for this fix. Also
+ changed the default config.pl so that single quotes are used
+ everywhere so that people don't get tripped up putting '$' inside
+ double-quoted strings.
+
+#------------------------------------------------------------------------
+# Version 1.03, 9 Dec 2001
+#------------------------------------------------------------------------
+
+* BackupPC now has full support for compression. There are now two
+ pool areas, the original pool for uncompressed files, and cpool for
+ compressed files. The compression is done by Compress::Zlib.
+ Compression reduces the pool disk usage by around 40%, although your
+ mileage may vary. Compression is optional and can also be specified on
+ a per-PC basis (although this will cost more pool storage since many
+ backup files will have to be stored in both compressed and
+ uncompressed forms.
+
+* A new script, BackupPC_compressPool, can be run to compress the entire
+ pool. This is used once to migrate all the pool data from uncompressed
+ to compressed on existing installations. Read the documentation
+ (Installing BackupPC/Compressing an existing pool) before running
+ BackupPC_compressPool!
+
+ Alternatively, compression can simply be turned on and all new backups
+ will be compressed. Both old (uncompressed) and new (compressed)
+ backups can be browsed and viewed. Eventually, the old backups will
+ expire and all the pool data will be compressed. However, until the
+ old backups expire, this approach could require 60% or more additional
+ pool storage space to store both uncompressed and compressed versions
+ of the backup files.
+
+* Significant improvements to the cgi interface, BackupPC_Admin:
+
+ - much better layout navigation
+ - handles compressed backup files and compressed log files
+ - handles unfilled incremental dumps
+ - better backup directory browsing navigation
+ - reports compression statistics
+ - $Conf{CgiDateFormatMMDD} allows you to set date format (MM/DD or DD/MM)
+ - Additional customization with $Conf{CgiHeaderFontType},
+ $Conf{CgiHeaderFontSize}, $Conf{CgiNavBarBgColor}, and
+ $Conf{CgiHeaderBgColor}.
+
+* Eliminated BackupPC_queueAll. BackupPC directly reads the hosts
+ file and queues the PCs itself. Like config.pl, BackupPC will
+ re-read the hosts file on each wakeup if its modification time
+ changes, or upon a SIGHUP. This also makes for better behavior
+ when adding a host: if you add hosts, simply send a SIGHUP to
+ BackupPC or wait for the next wakeup.
+
+* BackupPC_dump now compresses the SmbLOG file if compression is enabled.
+
+* BackupPC_dump keeps track of compressed file sizes so that compression
+ statistics can be reported by the cgi interface.
+
+* Aging of old log files now handles compressed log files (.z extension).
+
+* Added configuration option $Conf{IncrFill} to specify whether
+ incremental dumps should be filled in. Old behavior was that
+ filling was on. Now it's optional. See config.pl for more
+ details.
+
+* BackupPC_nightly now cleans and generates statistics for both
+ the uncompressed pool and compressed pool (cpool).
+
+* Added new utility script BackupPC_zcat that can be used to
+ uncompresses BackupPC files.
+
+* configure.pl offers various options related to compression,
+ depending upon whether this is a new install or upgrade,
+ and whether or not Compress::Zlib is installed.
+
+* configure.pl now makes a backup copy of config.pl before
+ config.pl is updated.
+
+* added three new fields to the backups file to handle optional
+ filling and compression stats.
+
+* Added -e option to BackupPC_dump. BackupPC now invokes BackupPC_dump -e
+ on each dhcp host once each night to verify that very old backups are
+ expired. This ensures that very old backups are expired even if
+ the dhcp host has not been on the network for a long time.
+
+* fixed bug in BackupPC::FileZIO.pm that required Compress::Zlib,
+ even if compression was off. Thanks to Steve Holmes for reporting
+ this.
+
+* fixed bug that caused a BackupPC queue to get blocked when a backup
+ cancel attempt was made during the BackupPC_link phase.
+
+#------------------------------------------------------------------------
+# Version 1.02, 28 Oct 2001.
+#------------------------------------------------------------------------
+
+* Added new script BackupPC_tarExtract to extract the smbclient tar
+ archive. This reduces disk writes by perhaps 90-95% and disk reads by
+ 50%. Previously, tar was used to extract and write everything to disk.
+ Then BackupPC_dump would read enough of each file to compute the MD5
+ digest, and then compare the full file with candidate pool files. So
+ for each 1MB file that matches a single file in the pool, there would
+ be 1MB of disk writes and 2MB of disk reads (to compare two 1MB files).
+
+ BackupPC_tarExtract instead extracts the archive using a 1MB memory
+ buffer. This allows the MD5 digest to be computed without touching the
+ disk. Next, any potential pool file compares are done by comparing the
+ pool file against the incoming tar data in memory, which only requires
+ the pool file to be read. So for each 1MB file that matches a single
+ file in the pool, there are now no disk writes, and only 1MB of reads.
+ BackupPC_tarExtract handles arbitrary size files and repeated
+ potential pool matches. If the incoming file doesn't match the pool
+ then it is written to disk (once the pool is mature this happens maybe
+ 5-10% of the time).
+
+* Substantial changes to BackupPC_dump:
+
+ + BackupPC_tarExtract is now used in place of tar.
+
+ + BackupPC_dump now reads the output from both smbclient and
+ BackupPC_tarExtract and merges them into SmbLOG.
+
+ + Named pipes are no longer used to connect smbclient to tar
+ (now BackupPC_tarExtract). Regular pipes are used instead.
+ This avoids the need to system mknod or mkfifo.
+
+ + Locked files on the client that can't be read by smbclient
+ previously were filled with 0x0 bytes by smbclient, meaning
+ tar extracted a useless file filled with 0x0 bytes. Now,
+ BackupPC_dump watches the output of smbclient and removes
+ any files that smbclient couldn't read. This avoids storing
+ useless files. It tries to replace such files with a hard link
+ to a previous dump. These actions appear in the log file.
+
+* added new module lib/BackupPC/FileZIO.pm. This handles pool file
+ I/O and is used by BackupPC_tarExtract. BackupPC::FileIO supports
+ reading and writing compressed and regular files and provides all the
+ hooks for compression support in BackupPC (should be supported in next
+ version). BackupPC::FileIO also does efficient writing of files that
+ contain leading 0x0 bytes (by seeking past the 0x0 bytes). This is
+ helpful when smbclient reads a locked file, and it fills the tar
+ output with a file of the correct size but all 0x0. Such files will be
+ later removed by BackupPC_dump. But in the meantime, BackupPC::FileIO
+ writes such files efficiently (as sparse files), meaning just a few
+ blocks of disk space will be needed even if the file is large.
+
+* alive/dead counting for blackout now works correctly for DHCP hosts.
+
+* BackupPC resets activeJob on startup, to fix bug when BackupPC was
+ killed and restarted with backups running.
+
+* added extra non blocking select() in BackupPC to make sure the socket
+ reads don't block.
+
+* BackupPC avoids queuing multiple BackupPC_queueAll's on the CmdQueue.
+
+* Updated BackupPC_sendEmail to correctly parse the locked file
+ error from 2.2.1a smbclient, so that missing Outlook file emails
+ can be correctly sent.
+
+* Changed HostInfoRead() in lib/BackupPC/Lib.pm to lowercase the
+ hostname read from the hosts file.
+
+* BackupPC_Admin provides general summary when the host name is empty.
+
+* configure.pl (and BackupPC) now requires perl 5.6.0 or later.
+
+* configure.pl complains if BackupPC is already running, reminding you
+ to stop it before upgrading.
+
+* updated documentation, and fixed auto-insertion of config.pl into
+ BackupPC.pod (previously the last config parameter was left out of
+ BackupPC.pod).
+
+#------------------------------------------------------------------------
+# Version 1.01, 30 Sep 2001
+#------------------------------------------------------------------------
+
+* Documentation cleanup in README, doc/BackupPC.pod, conf/config.pl.
+
+* BackupPC_sendMail now reads the optional per-PC config file, allowing
+ email configuration parameters to be set on a per-PC basis.
+
+* Removed the unused 4096-length MD5 digest code in lib/BackupPC/Lib.pm.
+
+#------------------------------------------------------------------------
+# Version 1.00, 21 Sep 2001
+#------------------------------------------------------------------------
+
+* Initial release of BackupPC on sourceforge.net.
--- /dev/null
+GNU GENERAL PUBLIC LICENSE
+
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC: Main program for PC backups.
+#
+# DESCRIPTION
+#
+# BackupPC reads the configuration and status information from
+# $TopDir/conf. It then runs and manages all the backup activity.
+#
+# As specified by $Conf{WakeupSchedule}, BackupPC wakes up periodically
+# to queue backups on all the PCs. This is a four step process:
+# 1) For each host and DHCP address backup requests are queued on the
+# background command queue.
+# 2) For each PC, BackupPC_dump is forked. Several of these may
+# be run in parallel, based on the configuration.
+# 3) For each complete, good, backup, BackupPC_link is forked.
+# Only one of these tasks runs at a time.
+# 4) In the background BackupPC_trashClean is run to remove any expired
+# backups. Once each night, BackupPC_nightly is run to complete some
+# additional administrative tasks (pool cleaning etc).
+#
+# BackupPC also listens for connections on a unix domain socket and
+# the tcp port $Conf{ServerPort}, which are used by various
+# sub-programs and the CGI script BackupPC_Admin for status reporting
+# and user-initiated backup or backup cancel requests.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use vars qw(%Status %Info $Hosts);
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+
+use File::Path;
+use Data::Dumper;
+use Getopt::Std;
+use Socket;
+use Carp;
+use Digest::MD5;
+
+###########################################################################
+# Handle command line options
+###########################################################################
+my %opts;
+getopts("d", \%opts);
+if ( @ARGV != 0 ) {
+ print("usage: $0 [-d]\n");
+ exit(1);
+}
+
+###########################################################################
+# Initialize major data structures and variables
+###########################################################################
+
+#
+# Get an instance of BackupPC::Lib and get some shortcuts.
+#
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+#
+# Verify we are running as the correct user
+#
+if ( $Conf{BackupPCUserVerify}
+ && $> != (my $uid = (getpwnam($Conf{BackupPCUser}))[2]) ) {
+ die "Wrong user: my userid is $>, instead of $uid ($Conf{BackupPCUser})\n";
+}
+
+#
+# %Status maintain status information about each host.
+# It is a hash of hashes, whose first index is the host.
+#
+%Status = ();
+
+#
+# %Info is a hash giving general information about BackupPC status.
+#
+%Info = ();
+
+#
+# Read old status
+#
+if ( -f "$TopDir/log/status.pl" && !(my $ret = do "$TopDir/log/status.pl") ) {
+ die "couldn't parse $TopDir/log/status.pl: $@" if $@;
+ die "couldn't do $TopDir/log/status.pl: $!" unless defined $ret;
+ die "couldn't run $TopDir/log/status.pl";
+}
+
+#
+# %Jobs maintains information about currently running jobs.
+# It is a hash of hashes, whose first index is the host.
+#
+my %Jobs = ();
+
+#
+# There are three command queues:
+# - @UserQueue is a queue of user initiated backup requests.
+# - @BgQueue is a queue of automatically scheduled backup requests.
+# - @CmdQueue is a queue of administrative jobs, including tasks
+# like BackupPC_link, BackupPC_trashClean, and BackupPC_nightly
+# Each queue is an array of hashes. Each hash stores information
+# about the command request.
+#
+my @UserQueue = ();
+my @CmdQueue = ();
+my @BgQueue = ();
+
+#
+# To quickly lookup if a given host is on a given queue, we keep
+# a hash of flags for each queue type.
+#
+my(%CmdQueueOn, %UserQueueOn, %BgQueueOn);
+
+#
+# One or more clients can connect to the server to get status information
+# or request/cancel backups etc. The %Clients hash maintains information
+# about each of these socket connections. The hash key is an incrementing
+# number stored in $ClientConnCnt. Each entry is a hash that contains
+# various information about the client connection.
+#
+my %Clients = ();
+my $ClientConnCnt;
+
+#
+# Read file descriptor mask used by select(). Every file descriptor
+# on which we expect to read (or accept) has the corresponding bit
+# set.
+#
+my $FDread = '';
+
+#
+# Unix seconds when we next wakeup. A value of zero forces the scheduler
+# to compute the next wakeup time.
+#
+my $NextWakeup = 0;
+
+#
+# Name of signal saved by catch_signal
+#
+my $SigName = "";
+
+#
+# Misc variables
+#
+my($RunNightlyWhenIdle, $FirstWakeup, $CmdJob, $ServerInetPort);
+
+#
+# Complete the rest of the initialization
+#
+Main_Initialize();
+
+###########################################################################
+# Main loop
+###########################################################################
+while ( 1 )
+{
+ #
+ # Check if we can/should run BackupPC_nightly
+ #
+ Main_TryToRun_nightly();
+
+ #
+ # Check if we can run a new command from @CmdQueue.
+ #
+ Main_TryToRun_CmdQueue();
+
+ #
+ # Check if we can run a new command from @UserQueue or @BgQueue.
+ #
+ Main_TryToRun_Bg_or_User_Queue();
+
+ #
+ # Do a select() to wait for the next interesting thing to happen
+ # (timeout, signal, someone sends a message, child dies etc).
+ #
+ my $fdRead = Main_Select();
+
+ #
+ # Process a signal if we received one.
+ #
+ if ( $SigName ) {
+ Main_Process_Signal();
+ $fdRead = undef;
+ }
+
+ #
+ # Check if a timeout has occurred.
+ #
+ Main_Check_Timeout();
+
+ #
+ # Check for, and process, any messages (output) from our jobs
+ #
+ Main_Check_Job_Messages($fdRead);
+
+ #
+ # Check for, and process, any output from our clients. Also checks
+ # for new connections to our SERVER_UNIX and SERVER_INET sockets.
+ #
+ Main_Check_Client_Messages($fdRead);
+}
+
+############################################################################
+# Main_Initialize()
+#
+# Main initialization routine. Called once at statup.
+############################################################################
+sub Main_Initialize
+{
+ umask($Conf{UmaskMode});
+
+ #
+ # Check for another running process, check that PASSWD is set and
+ # verify executables are configured correctly.
+ #
+ if ( $Info{pid} ne "" && kill(0, $Info{pid}) ) {
+ print(STDERR $bpc->timeStamp,
+ "Another BackupPC is running (pid $Info{pid}); quitting...\n");
+ exit(1);
+ }
+ foreach my $progName ( qw(SmbClientPath NmbLookupPath PingPath DfPath
+ SendmailPath) ) {
+ next if ( !defined($Conf{$progName}) || -x $Conf{$progName} );
+ print(STDERR $bpc->timeStamp,
+ "\$Conf{$progName} = '$Conf{$progName}' is not a"
+ . " valid executable program\n");
+ exit(1);
+ }
+
+ if ( $opts{d} ) {
+ #
+ # daemonize by forking
+ #
+ defined(my $pid = fork) or die "Can't fork: $!";
+ exit if $pid; # parent exits
+ }
+
+ #
+ # Open the LOG file and redirect STDOUT, STDERR etc
+ #
+ LogFileOpen();
+
+ #
+ # Read the hosts file (force a read).
+ #
+ exit(1) if ( !HostsUpdate(1) );
+
+ #
+ # Clean up %ENV for taint checking
+ #
+ delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};
+ $ENV{PATH} = $Conf{MyPath};
+
+ #
+ # Initialize server sockets
+ #
+ ServerSocketInit();
+
+ #
+ # Catch various signals
+ #
+ foreach my $sig ( qw(INT BUS SEGV PIPE TERM ALRM HUP) ) {
+ $SIG{$sig} = \&catch_signal;
+ }
+
+ #
+ # Report that we started, and update %Info.
+ #
+ print(LOG $bpc->timeStamp, "BackupPC started, pid $$\n");
+ $Info{ConfigModTime} = $bpc->ConfigMTime();
+ $Info{pid} = $$;
+ $Info{startTime} = time;
+ $Info{Version} = $bpc->{Version};
+
+ #
+ # Update the status left over form the last time BackupPC ran.
+ # Requeue any pending links.
+ #
+ foreach my $host ( sort(keys(%$Hosts)) ) {
+ if ( $Status{$host}{state} eq "backup in progress" ) {
+ #
+ # should we restart it? skip it for now.
+ #
+ $Status{$host}{state} = "idle";
+ } elsif ( $Status{$host}{state} eq "link pending"
+ || $Status{$host}{state} eq "link running" ) {
+ QueueLink($host);
+ } else {
+ $Status{$host}{state} = "idle";
+ }
+ $Status{$host}{activeJob} = 0;
+ }
+
+ #
+ # Write out our initial status and save our PID
+ #
+ StatusWrite();
+ if ( open(PID, ">$TopDir/log/BackupPC.pid") ) {
+ print(PID $$);
+ close(PID);
+ }
+
+ #
+ # For unknown reasons there is a very infrequent error about not
+ # being able to coerce GLOBs inside the XS Data::Dumper. I've
+ # only seen this on a particular platform and perl version.
+ # For now the workaround appears to be use the perl version of
+ # XS Data::Dumper.
+ #
+ $Data::Dumper::Useqq = 1;
+}
+
+############################################################################
+# Main_TryToRun_nightly()
+#
+# Checks to see if we can/should run BackupPC_nightly or
+# BackupPC_trashClean. If so we push the appropriate command onto
+# @CmdQueue.
+############################################################################
+sub Main_TryToRun_nightly
+{
+ #
+ # Check if we should run BackupPC_nightly or BackupPC_trashClean.
+ # BackupPC_nightly is run when the current job queue is empty.
+ # BackupPC_trashClean is run in the background always.
+ #
+ my $trashCleanRunning = defined($Jobs{$bpc->trashJob}) ? 1 : 0;
+ if ( !$trashCleanRunning && !$CmdQueueOn{$bpc->trashJob} ) {
+ #
+ # This should only happen once at startup, but just in case this
+ # code will re-start BackupPC_trashClean if it quits
+ #
+ unshift(@CmdQueue, {
+ host => $bpc->trashJob,
+ user => "BackupPC",
+ reqTime => time,
+ cmd => "$BinDir/BackupPC_trashClean"
+ });
+ $CmdQueueOn{$bpc->trashJob} = 1;
+ }
+ if ( keys(%Jobs) == $trashCleanRunning && $RunNightlyWhenIdle == 1 ) {
+ push(@CmdQueue, {
+ host => $bpc->adminJob,
+ user => "BackupPC",
+ reqTime => time,
+ cmd => "$BinDir/BackupPC_nightly"
+ });
+ $CmdQueueOn{$bpc->adminJob} = 1;
+ $RunNightlyWhenIdle = 2;
+ }
+}
+
+############################################################################
+# Main_TryToRun_CmdQueue()
+#
+# Decide if we can run a new command from the @CmdQueue.
+# We only run one of these at a time. The @CmdQueue is
+# used to run BackupPC_link (for the corresponding host),
+# BackupPC_trashClean, and BackupPC_nightly using a fake
+# host name of $bpc->adminJob.
+############################################################################
+sub Main_TryToRun_CmdQueue
+{
+ my($req, $host);
+ if ( $CmdJob eq "" && @CmdQueue > 0 && $RunNightlyWhenIdle != 1 ) {
+ local(*FH);
+ $req = pop(@CmdQueue);
+
+ $host = $req->{host};
+ if ( defined($Jobs{$host}) ) {
+ print(LOG $bpc->timeStamp,
+ "Botch on admin job for $host: already in use!!\n");
+ #
+ # This could happen during normal opertion: a user could
+ # request a backup while a BackupPC_link is queued from
+ # a previous backup. But it is unlikely. Just put this
+ # request back on the end of the queue.
+ #
+ unshift(@CmdQueue, $req);
+ return;
+ }
+ $CmdQueueOn{$host} = 0;
+ my $cmd = $req->{cmd};
+ my $pid = open(FH, "-|");
+ if ( !defined($pid) ) {
+ print(LOG $bpc->timeStamp,
+ "can't fork for $host, request by $req->{user}\n");
+ close(FH);
+ next;
+ }
+ if ( !$pid ) {
+ setpgrp 0,0;
+ exec($cmd);
+ print(LOG $bpc->timeStamp, "can't exec $cmd for $host\n");
+ exit(0);
+ }
+ $Jobs{$host}{pid} = $pid;
+ $Jobs{$host}{fh} = *FH;
+ $Jobs{$host}{fn} = fileno(FH);
+ vec($FDread, $Jobs{$host}{fn}, 1) = 1;
+ $Jobs{$host}{startTime} = time;
+ $Jobs{$host}{reqTime} = $req->{reqTime};
+ $Jobs{$host}{cmd} = $cmd;
+ $Jobs{$host}{type} = $Status{$host}{type};
+ $Status{$host}{state} = "link running";
+ $Status{$host}{activeJob} = 1;
+ $Status{$host}{endTime} = time;
+ $CmdJob = $host if ( $host ne $bpc->trashJob );
+ $cmd =~ s/$BinDir\///g;
+ print(LOG $bpc->timeStamp, "Running $cmd (pid=$pid)\n");
+ }
+}
+
+############################################################################
+# Main_TryToRun_Bg_or_User_Queue()
+#
+# Decide if we can run any new backup requests from @BgQueue
+# or @UserQueue. Several of these can be run at the same time
+# based on %Conf settings. Jobs from @UserQueue take priority,
+# and at total of $Conf{MaxBackups} + $Conf{MaxUserBackups}
+# simultaneous jobs can run from @UserQueue. After @UserQueue
+# is exhausted, up to $Conf{MaxBackups} simultaneous jobs can
+# run from @BgQueue.
+############################################################################
+sub Main_TryToRun_Bg_or_User_Queue
+{
+ my($req, $host);
+ while ( $RunNightlyWhenIdle == 0 ) {
+ local(*FH);
+ my(@args, @deferUserQueue, @deferBgQueue, $progName, $type);
+ my $nJobs = keys(%Jobs);
+ #
+ # CmdJob and trashClean don't count towards MaxBackups / MaxUserBackups
+ #
+ $nJobs-- if ( $CmdJob ne "" );
+ $nJobs-- if ( defined($Jobs{$bpc->trashJob} ) );
+ if ( $nJobs < $Conf{MaxBackups} + $Conf{MaxUserBackups}
+ && @UserQueue > 0 ) {
+ $req = pop(@UserQueue);
+ if ( defined($Jobs{$req->{host}}) ) {
+ push(@deferUserQueue, $req);
+ next;
+ }
+ push(@args, $req->{doFull} ? "-f" : "-i")
+ if ( !$req->{restore} );
+ $UserQueueOn{$req->{host}} = 0;
+ } elsif ( $nJobs < $Conf{MaxBackups}
+ && (@CmdQueue + $nJobs)
+ <= $Conf{MaxBackups} + $Conf{MaxPendingCmds}
+ && @BgQueue > 0 ) {
+ my $du;
+ if ( time - $Info{DUlastValueTime} >= 60 ) {
+ #
+ # Update our notion of disk usage no more than
+ # once every minute
+ #
+ $du = $bpc->CheckFileSystemUsage($TopDir);
+ $Info{DUlastValue} = $du;
+ $Info{DUlastValueTime} = time;
+ } else {
+ #
+ # if we recently checked it then just use the old value
+ #
+ $du = $Info{DUlastValue};
+ }
+ if ( $Info{DUDailyMaxReset} ) {
+ $Info{DUDailyMaxStartTime} = time;
+ $Info{DUDailyMaxReset} = 0;
+ $Info{DUDailyMax} = 0;
+ }
+ if ( $du > $Info{DUDailyMax} ) {
+ $Info{DUDailyMax} = $du;
+ $Info{DUDailyMaxTime} = time;
+ }
+ if ( $du > $Conf{DfMaxUsagePct} ) {
+ my $nSkip = @BgQueue + @deferBgQueue;
+ print(LOG $bpc->timeStamp,
+ "Disk too full ($du%%); skipping $nSkip hosts\n");
+ $Info{DUDailySkipHostCnt} += $nSkip;
+ @BgQueue = ();
+ @deferBgQueue = ();
+ %BgQueueOn = ();
+ next;
+ }
+ $req = pop(@BgQueue);
+ if ( defined($Jobs{$req->{host}}) ) {
+ push(@deferBgQueue, $req);
+ next;
+ }
+ $BgQueueOn{$req->{host}} = 0;
+ } else {
+ while ( @deferBgQueue ) {
+ push(@BgQueue, pop(@deferBgQueue));
+ }
+ while ( @deferUserQueue ) {
+ push(@UserQueue, pop(@deferUserQueue));
+ }
+ last;
+ }
+ $host = $req->{host};
+ my $user = $req->{user};
+ if ( $req->{restore} ) {
+ $progName = "BackupPC_restore";
+ $type = "restore";
+ push(@args, $req->{hostIP}, $req->{host}, $req->{reqFileName});
+ } else {
+ $progName = "BackupPC_dump";
+ $type = "backup";
+ push(@args, "-d") if ( $req->{dhcp} );
+ push(@args, "-e") if ( $req->{dumpExpire} );
+ push(@args, $host);
+ }
+ my $pid = open(FH, "-|");
+ if ( !defined($pid) ) {
+ print(LOG $bpc->timeStamp,
+ "can't fork to run $progName for $host, request by $user\n");
+ close(FH);
+ next;
+ }
+ if ( !$pid ) {
+ setpgrp 0,0;
+ exec("$BinDir/$progName", @args);
+ print(LOG $bpc->timeStamp, "can't exec $progName for $host\n");
+ exit(0);
+ }
+ $Jobs{$host}{pid} = $pid;
+ $Jobs{$host}{fh} = *FH;
+ $Jobs{$host}{fn} = fileno(FH);
+ $Jobs{$host}{dhcp} = $req->{dhcp};
+ vec($FDread, $Jobs{$host}{fn}, 1) = 1;
+ $Jobs{$host}{startTime} = time;
+ $Jobs{$host}{reqTime} = $req->{reqTime};
+ $Jobs{$host}{cmd} = "$progName " . join(" ", @args);
+ $Jobs{$host}{user} = $user;
+ $Jobs{$host}{type} = $type;
+ if ( !$req->{dhcp} ) {
+ $Status{$host}{state} = "$type starting";
+ $Status{$host}{activeJob} = 1;
+ $Status{$host}{startTime} = time;
+ $Status{$host}{endTime} = "";
+ }
+ }
+}
+
+############################################################################
+# Main_Select()
+#
+# If necessary, figure out when to next wakeup based on $Conf{WakeupSchedule},
+# and then do a select() to wait for the next thing to happen
+# (timeout, signal, someone sends a message, child dies etc).
+############################################################################
+sub Main_Select
+{
+ if ( $NextWakeup <= 0 ) {
+ #
+ # Figure out when to next wakeup based on $Conf{WakeupSchedule}.
+ #
+ my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst)
+ = localtime(time);
+ my($currHours) = $hour + $min / 60 + $sec / 3600;
+ if ( $bpc->ConfigMTime() != $Info{ConfigModTime} ) {
+ my($mesg) = $bpc->ConfigRead()
+ || "Re-read config file because mtime changed";
+ print(LOG $bpc->timeStamp, "$mesg\n");
+ %Conf = $bpc->Conf();
+ $Info{ConfigModTime} = $bpc->ConfigMTime();
+ umask($Conf{UmaskMode});
+ ServerSocketInit();
+ }
+ my $delta = -1;
+ foreach my $t ( @{$Conf{WakeupSchedule} || [0..23]} ) {
+ next if ( $t < 0 || $t > 24 );
+ my $tomorrow = $t + 24;
+ if ( $delta < 0
+ || ($tomorrow - $currHours > 0
+ && $delta > $tomorrow - $currHours) ) {
+ $delta = $tomorrow - $currHours;
+ $FirstWakeup = $t == $Conf{WakeupSchedule}[0];
+ }
+ if ( $delta < 0
+ || ($t - $currHours > 0 && $delta > $t - $currHours) ) {
+ $delta = $t - $currHours;
+ $FirstWakeup = $t == $Conf{WakeupSchedule}[0];
+ }
+ }
+ $NextWakeup = time + $delta * 3600;
+ $Info{nextWakeup} = $NextWakeup;
+ print(LOG $bpc->timeStamp, "Next wakeup is ",
+ $bpc->timeStamp($NextWakeup, 1), "\n");
+ }
+ #
+ # Call select(), waiting until either a signal, a timeout,
+ # any output from our jobs, or any messages from clients
+ # connected via tcp.
+ # select() is where we (hopefully) spend most of our time blocked...
+ #
+ my $timeout = $NextWakeup - time;
+ $timeout = 1 if ( $timeout <= 0 );
+ my $ein = $FDread;
+ select(my $rout = $FDread, undef, $ein, $timeout);
+
+ return $rout;
+}
+
+############################################################################
+# Main_Process_Signal()
+#
+# Signal handler.
+############################################################################
+sub Main_Process_Signal
+{
+ #
+ # Process signals
+ #
+ if ( $SigName eq "HUP" ) {
+ my($mesg) = $bpc->ConfigRead()
+ || "Re-read config file because of a SIG_HUP";
+ print(LOG $bpc->timeStamp, "$mesg\n");
+ $Info{ConfigModTime} = $bpc->ConfigMTime();
+ %Conf = $bpc->Conf();
+ umask($Conf{UmaskMode});
+ ServerSocketInit();
+ HostsUpdate(0);
+ $NextWakeup = 0;
+ } elsif ( $SigName ) {
+ print(LOG $bpc->timeStamp, "Got signal $SigName... cleaning up\n");
+ foreach my $host ( keys(%Jobs) ) {
+ kill(2, $Jobs{$host}{pid});
+ }
+ StatusWrite();
+ unlink("$TopDir/log/BackupPC.pid");
+ exit(1);
+ }
+ $SigName = "";
+}
+
+############################################################################
+# Main_Check_Timeout()
+#
+# Check if a timeout has occured, and if so, queue all the PCs for backups.
+# Also does log file aging on the first timeout after midnight.
+############################################################################
+sub Main_Check_Timeout
+{
+ #
+ # Process timeouts
+ #
+ return if ( time < $NextWakeup || $NextWakeup <= 0 );
+ $NextWakeup = 0;
+ if ( $FirstWakeup ) {
+ #
+ # This is the first wakeup after midnight. Do log file aging
+ # and various house keeping.
+ #
+ $FirstWakeup = 0;
+ printf(LOG "%s24hr disk usage: %d%% max, %d%% recent,"
+ . " %d skipped hosts\n",
+ $bpc->timeStamp, $Info{DUDailyMax}, $Info{DUlastValue},
+ $Info{DUDailySkipHostCnt});
+ $Info{DUDailyMaxReset} = 1;
+ $Info{DUDailyMaxPrev} = $Info{DUDailyMax};
+ $Info{DUDailySkipHostCntPrev} = $Info{DUDailySkipHostCnt};
+ $Info{DUDailySkipHostCnt} = 0;
+ my $lastLog = $Conf{MaxOldLogFiles} - 1;
+ if ( -f "$TopDir/log/LOG.$lastLog" ) {
+ print(LOG $bpc->timeStamp,
+ "Removing $TopDir/log/LOG.$lastLog\n");
+ unlink("$TopDir/log/LOG.$lastLog");
+ }
+ if ( -f "$TopDir/log/LOG.$lastLog.z" ) {
+ print(LOG $bpc->timeStamp,
+ "Removing $TopDir/log/LOG.$lastLog.z\n");
+ unlink("$TopDir/log/LOG.$lastLog.z");
+ }
+ print(LOG $bpc->timeStamp, "Aging LOG files, LOG -> LOG.0 -> "
+ . "LOG.1 -> ... -> LOG.$lastLog\n");
+ close(LOG);
+ for ( my $i = $lastLog - 1 ; $i >= 0 ; $i-- ) {
+ my $j = $i + 1;
+ rename("$TopDir/log/LOG.$i", "$TopDir/log/LOG.$j")
+ if ( -f "$TopDir/log/LOG.$i" );
+ rename("$TopDir/log/LOG.$i.z", "$TopDir/log/LOG.$j.z")
+ if ( -f "$TopDir/log/LOG.$i.z" );
+ }
+ #
+ # Compress the log file LOG -> LOG.0.z (if enabled).
+ # Otherwise, just rename LOG -> LOG.0.
+ #
+ BackupPC::FileZIO->compressCopy("$TopDir/log/LOG",
+ "$TopDir/log/LOG.0.z",
+ "$TopDir/log/LOG.0",
+ $Conf{CompressLevel}, 1);
+ LogFileOpen();
+ #
+ # Remember to run nightly script after current jobs are done
+ #
+ $RunNightlyWhenIdle = 1;
+ }
+ #
+ # Write out the current status and then queue all the PCs
+ #
+ HostsUpdate(0);
+ StatusWrite();
+ %BgQueueOn = () if ( @BgQueue == 0 );
+ %UserQueueOn = () if ( @UserQueue == 0 );
+ %CmdQueueOn = () if ( @CmdQueue == 0 );
+ QueueAllPCs();
+}
+
+############################################################################
+# Main_Check_Job_Messages($fdRead)
+#
+# Check if select() says we have bytes waiting from any of our jobs.
+# Handle each of the messages when complete (newline terminated).
+############################################################################
+sub Main_Check_Job_Messages
+{
+ my($fdRead) = @_;
+ foreach my $host ( keys(%Jobs) ) {
+ next if ( !vec($fdRead, $Jobs{$host}{fn}, 1) );
+ my $mesg;
+ #
+ # do a last check to make sure there is something to read so
+ # we are absolutely sure we won't block.
+ #
+ vec(my $readMask, $Jobs{$host}{fn}, 1) = 1;
+ if ( !select($readMask, undef, undef, 0.0) ) {
+ print(LOG $bpc->timeStamp, "Botch in Main_Check_Job_Messages:"
+ . " nothing to read from $host. Debug dump:\n");
+ my($dump) = Data::Dumper->new(
+ [ \%Clients, \%Jobs, \$FDread, \$fdRead],
+ [qw(*Clients, *Jobs *FDread, *fdRead)]);
+ $dump->Indent(1);
+ print(LOG $dump->Dump);
+ next;
+ }
+ my $nbytes = sysread($Jobs{$host}{fh}, $mesg, 1024);
+ $Jobs{$host}{mesg} .= $mesg if ( $nbytes > 0 );
+ #
+ # Process any complete lines of output from this jobs.
+ # Any output to STDOUT or STDERR from the children is processed here.
+ #
+ while ( $Jobs{$host}{mesg} =~ /(.*?)[\n\r]+(.*)/s ) {
+ $mesg = $1;
+ $Jobs{$host}{mesg} = $2;
+ if ( $Jobs{$host}{dhcp} ) {
+ if ( $mesg =~ /^DHCP (\S+) (\S+)/ ) {
+ my $newHost = $2;
+ if ( defined($Jobs{$newHost}) ) {
+ print(LOG $bpc->timeStamp,
+ "Backup on $newHost is already running\n");
+ kill(2, $Jobs{$host}{pid});
+ $nbytes = 0;
+ last;
+ }
+ $Jobs{$host}{dhcpHostIP} = $host;
+ $Status{$newHost}{dhcpHostIP} = $host;
+ $Jobs{$newHost} = $Jobs{$host};
+ delete($Jobs{$host});
+ $host = $newHost;
+ $Status{$host}{state} = "backup starting";
+ $Status{$host}{activeJob} = 1;
+ $Status{$host}{startTime} = $Jobs{$host}{startTime};
+ $Status{$host}{endTime} = "";
+ $Jobs{$host}{dhcp} = 0;
+ } else {
+ print(LOG $bpc->timeStamp, "dhcp $host: $mesg\n");
+ }
+ } elsif ( $mesg =~ /^started (.*) dump, pid=(\d+), tarPid=(-?\d+)/ ) {
+ $Jobs{$host}{type} = $1;
+ $Jobs{$host}{xferPid} = $2;
+ $Jobs{$host}{tarPid} = $3;
+ print(LOG $bpc->timeStamp,
+ "Started $1 backup on $host"
+ . " (pid=$Jobs{$host}{pid}, xferPid=$2",
+ $Jobs{$host}{tarPid} > 0
+ ? ", tarPid=$Jobs{$host}{tarPid}" : "",
+ $Jobs{$host}{dhcpHostIP}
+ ? ", dhcp=$Jobs{$host}{dhcpHostIP}" : "",
+ ")\n");
+ $Status{$host}{state} = "backup in progress";
+ $Status{$host}{reason} = "";
+ $Status{$host}{type} = $1;
+ $Status{$host}{startTime} = time;
+ $Status{$host}{deadCnt} = 0;
+ $Status{$host}{aliveCnt}++;
+ $Status{$host}{dhcpCheckCnt}--
+ if ( $Status{$host}{dhcpCheckCnt} > 0 );
+ } elsif ( $mesg =~ /^started_restore (\S+) (\S+)/ ) {
+ $Jobs{$host}{type} = "restore";
+ $Jobs{$host}{xferPid} = $1;
+ $Jobs{$host}{tarPid} = $2;
+ print(LOG $bpc->timeStamp,
+ "Started restore on $host"
+ . " (pid=$Jobs{$host}{pid}, xferPid=$2",
+ $Jobs{$host}{tarPid} > 0
+ ? ", tarPid=$Jobs{$host}{tarPid}" : "",
+ ")\n");
+ $Status{$host}{state} = "restore in progress";
+ $Status{$host}{reason} = "";
+ $Status{$host}{type} = "restore";
+ $Status{$host}{startTime} = time;
+ $Status{$host}{deadCnt} = 0;
+ $Status{$host}{aliveCnt}++;
+ } elsif ( $mesg =~ /^(full|incr) backup complete/ ) {
+ print(LOG $bpc->timeStamp, "Finished $1 backup on $host\n");
+ $Status{$host}{reason} = "backup done";
+ delete($Status{$host}{error});
+ delete($Status{$host}{errorTime});
+ $Status{$host}{endTime} = time;
+ } elsif ( $mesg =~ /^restore complete/ ) {
+ print(LOG $bpc->timeStamp, "Finished restore on $host\n");
+ $Status{$host}{reason} = "restore done";
+ delete($Status{$host}{error});
+ delete($Status{$host}{errorTime});
+ $Status{$host}{endTime} = time;
+ } elsif ( $mesg =~ /^nothing to do/ ) {
+ $Status{$host}{state} = "idle";
+ $Status{$host}{reason} = "nothing to do";
+ $Status{$host}{startTime} = time;
+ $Status{$host}{dhcpCheckCnt}--
+ if ( $Status{$host}{dhcpCheckCnt} > 0 );
+ } elsif ( $mesg =~ /^no ping response/
+ || $mesg =~ /^ping too slow/ ) {
+ $Status{$host}{state} = "idle";
+ if ( $Status{$host}{reason} ne "backup failed" ) {
+ $Status{$host}{reason} = "no ping";
+ $Status{$host}{startTime} = time;
+ }
+ $Status{$host}{deadCnt}++;
+ if ( $Status{$host}{deadCnt} >= $Conf{BlackoutBadPingLimit} ) {
+ $Status{$host}{aliveCnt} = 0;
+ }
+ } elsif ( $mesg =~ /^dump failed: (.*)/ ) {
+ $Status{$host}{state} = "idle";
+ $Status{$host}{reason} = "backup failed";
+ $Status{$host}{error} = $1;
+ $Status{$host}{errorTime} = time;
+ $Status{$host}{endTime} = time;
+ print(LOG $bpc->timeStamp, "backup failed on $host ($1)\n");
+ } elsif ( $mesg =~ /^log\s+(.*)/ ) {
+ print(LOG $bpc->timeStamp, "$1\n");
+ } elsif ( $mesg =~ /^BackupPC_stats = (.*)/ ) {
+ my @f = split(/,/, $1);
+ $Info{"$f[0]FileCnt"} = $f[1];
+ $Info{"$f[0]DirCnt"} = $f[2];
+ $Info{"$f[0]Kb"} = $f[3];
+ $Info{"$f[0]Kb2"} = $f[4];
+ $Info{"$f[0]KbRm"} = $f[5];
+ $Info{"$f[0]FileCntRm"} = $f[6];
+ $Info{"$f[0]FileCntRep"} = $f[7];
+ $Info{"$f[0]FileRepMax"} = $f[8];
+ $Info{"$f[0]FileCntRename"} = $f[9];
+ $Info{"$f[0]Time"} = time;
+ printf(LOG "%s%s nightly clean removed %d files of"
+ . " size %.2fGB\n",
+ $bpc->timeStamp, ucfirst($f[0]),
+ $Info{"$f[0]FileCntRm"},
+ $Info{"$f[0]KbRm"} / (1000 * 1024));
+ printf(LOG "%s%s is %.2fGB, %d files (%d repeated, "
+ . "%d max chain), %d directories\n",
+ $bpc->timeStamp, ucfirst($f[0]),
+ $Info{"$f[0]Kb"} / (1000 * 1024),
+ $Info{"$f[0]FileCnt"}, $Info{"$f[0]FileCntRep"},
+ $Info{"$f[0]FileRepMax"}, $Info{"$f[0]DirCnt"});
+ } elsif ( $mesg =~ /^BackupPC_nightly lock_off/ ) {
+ $RunNightlyWhenIdle = 0;
+ } elsif ( $mesg =~ /^processState\s+(.+)/ ) {
+ $Jobs{$host}{processState} = $1;
+ } elsif ( $mesg =~ /^link\s+(.+)/ ) {
+ my($h) = $1;
+ $Status{$h}{needLink} = 1;
+ } else {
+ print(LOG $bpc->timeStamp, "$host: $mesg\n");
+ }
+ }
+ #
+ # shut down the client connection if we read EOF
+ #
+ if ( $nbytes <= 0 ) {
+ close($Jobs{$host}{fh});
+ vec($FDread, $Jobs{$host}{fn}, 1) = 0;
+ if ( $CmdJob eq $host ) {
+ my $cmd = $Jobs{$host}{cmd};
+ $cmd =~ s/$BinDir\///g;
+ print(LOG $bpc->timeStamp, "Finished $host ($cmd)\n");
+ $Status{$host}{state} = "idle";
+ $Status{$host}{endTime} = time;
+ $CmdJob = "";
+ $RunNightlyWhenIdle = 0 if ( $cmd eq "BackupPC_nightly"
+ && $RunNightlyWhenIdle );
+ } else {
+ #
+ # Queue BackupPC_link to complete the backup
+ # processing for this host.
+ #
+ if ( defined($Status{$host})
+ && ($Status{$host}{reason} eq "backup done"
+ || $Status{$host}{needLink}) ) {
+ QueueLink($host);
+ } elsif ( defined($Status{$host}) ) {
+ $Status{$host}{state} = "idle";
+ }
+ }
+ delete($Jobs{$host});
+ $Status{$host}{activeJob} = 0 if ( defined($Status{$host}) );
+ }
+ }
+ #
+ # When we are idle (empty Jobs, CmdQueue, BgQueue, UserQueue) we
+ # do a pass over %Status updating the deadCnt and aliveCnt for
+ # DHCP hosts. The reason we need to do this later is we can't
+ # be sure whether a DHCP host is alive or dead until we have passed
+ # over all the DHCP pool.
+ #
+ return if ( @CmdQueue || @BgQueue || @UserQueue || keys(%Jobs) > 1 );
+ foreach my $host ( keys(%Status) ) {
+ next if ( $Status{$host}{dhcpCheckCnt} <= 0 );
+ $Status{$host}{deadCnt} += $Status{$host}{dhcpCheckCnt};
+ $Status{$host}{dhcpCheckCnt} = 0;
+ if ( $Status{$host}{deadCnt} >= $Conf{BlackoutBadPingLimit} ) {
+ $Status{$host}{aliveCnt} = 0;
+ }
+ }
+}
+
+############################################################################
+# Main_Check_Client_Messages($fdRead)
+#
+# Check for, and process, any output from our clients. Also checks
+# for new connections to our SERVER_UNIX and SERVER_INET sockets.
+############################################################################
+sub Main_Check_Client_Messages
+{
+ my($fdRead) = @_;
+ foreach my $client ( keys(%Clients) ) {
+ next if ( !vec($fdRead, $Clients{$client}{fn}, 1) );
+ my($mesg, $host);
+ #
+ # do a last check to make sure there is something to read so
+ # we are absolutely sure we won't block.
+ #
+ vec(my $readMask, $Clients{$client}{fn}, 1) = 1;
+ if ( !select($readMask, undef, undef, 0.0) ) {
+ print(LOG $bpc->timeStamp, "Botch in Main_Check_Client_Messages:"
+ . " nothing to read from $client. Debug dump:\n");
+ my($dump) = Data::Dumper->new(
+ [ \%Clients, \%Jobs, \$FDread, \$fdRead],
+ [qw(*Clients, *Jobs *FDread, *fdRead)]);
+ $dump->Indent(1);
+ print(LOG $dump->Dump);
+ next;
+ }
+ my $nbytes = sysread($Clients{$client}{fh}, $mesg, 1024);
+ $Clients{$client}{mesg} .= $mesg if ( $nbytes > 0 );
+ #
+ # Process any complete lines received from this client.
+ #
+ while ( $Clients{$client}{mesg} =~ /(.*?)[\n\r]+(.*)/s ) {
+ my($reply);
+ my $cmd = $1;
+ $Clients{$client}{mesg} = $2;
+ #
+ # Authenticate the message by checking the MD5 digest
+ #
+ my $md5 = Digest::MD5->new;
+ if ( $cmd !~ /^(.{22}) (.*)/
+ || ($md5->add($Clients{$client}{seed}
+ . $Clients{$client}{mesgCnt}
+ . $Conf{ServerMesgSecret} . $2),
+ $md5->b64digest ne $1) ) {
+ print(LOG $bpc->timeStamp, "Corrupted message '$cmd' from"
+ . " client '$Clients{$client}{clientName}':"
+ . " shutting down client connection\n");
+ $nbytes = 0;
+ last;
+ }
+ $Clients{$client}{mesgCnt}++;
+ $cmd = $2;
+ if ( $cmd =~ /^stop (\S+)\s+(\S+)\s+(\S*)/ ) {
+ $host = $1;
+ my $user = $2;
+ my $backoff = $3;
+ if ( $CmdJob ne $host && defined($Status{$host})
+ && defined($Jobs{$host}) ) {
+ print(LOG $bpc->timeStamp,
+ "Stopping current backup of $host,"
+ . " request by $user (backoff=$backoff)\n");
+ kill(2, $Jobs{$host}{pid});
+ vec($FDread, $Jobs{$host}{fn}, 1) = 0;
+ close($Jobs{$host}{fh});
+ delete($Jobs{$host});
+ $Status{$host}{state} = "idle";
+ $Status{$host}{reason} = "backup canceled by $user";
+ $Status{$host}{activeJob} = 0;
+ $Status{$host}{startTime} = time;
+ $reply = "ok: backup of $host cancelled";
+ } elsif ( $BgQueueOn{$host} || $UserQueueOn{$host} ) {
+ print(LOG $bpc->timeStamp,
+ "Stopping pending backup of $host,"
+ . " request by $user (backoff=$backoff)\n");
+ @BgQueue = grep($_->{host} ne $host, @BgQueue);
+ @UserQueue = grep($_->{host} ne $host, @UserQueue);
+ $BgQueueOn{$host} = $UserQueueOn{$host} = 0;
+ $reply = "ok: pending backup of $host cancelled";
+ } else {
+ print(LOG $bpc->timeStamp,
+ "Nothing to do for stop backup of $host,"
+ . " request by $user (backoff=$backoff)\n");
+ $reply = "ok: no backup was pending or running";
+ }
+ if ( defined($Status{$host}) && $backoff ne "" ) {
+ if ( $backoff > 0 ) {
+ $Status{$host}{backoffTime} = time + $backoff * 3600;
+ } else {
+ delete($Status{$host}{backoffTime});
+ }
+ }
+ } elsif ( $cmd =~ /^backup all$/ ) {
+ QueueAllPCs();
+ } elsif ( $cmd =~ /^backup (\S+)\s+(\S+)\s+(\S+)\s+(\S+)/ ) {
+ my $hostIP = $1;
+ $host = $2;
+ my $user = $3;
+ my $doFull = $4;
+ if ( !defined($Status{$host}) ) {
+ print(LOG $bpc->timeStamp,
+ "User $user requested backup of unknown host"
+ . " $host\n");
+ $reply = "error: unknown host $host";
+ } elsif ( defined($Jobs{$host})
+ && $Jobs{$host}{type} ne "restore" ) {
+ print(LOG $bpc->timeStamp,
+ "User $user requested backup of $host,"
+ . " but one is currently running\n");
+ $reply = "error: backup of $host is already running";
+ } else {
+ print(LOG $bpc->timeStamp,
+ "User $user requested backup of $host"
+ . " ($hostIP)\n");
+ if ( $BgQueueOn{$hostIP} ) {
+ @BgQueue = grep($_->{host} ne $hostIP, @BgQueue);
+ $BgQueueOn{$hostIP} = 0;
+ }
+ if ( $UserQueueOn{$hostIP} ) {
+ @UserQueue = grep($_->{host} ne $hostIP, @UserQueue);
+ $UserQueueOn{$hostIP} = 0;
+ }
+ unshift(@UserQueue, {
+ host => $hostIP,
+ user => $user,
+ reqTime => time,
+ doFull => $doFull,
+ dhcp => $hostIP eq $host ? 0 : 1,
+ });
+ $UserQueueOn{$hostIP} = 1;
+ $reply = "ok: requested backup of $host";
+ }
+ } elsif ( $cmd =~ /^restore (\S+)\s+(\S+)\s+(\S+)\s+(\S+)/ ) {
+ my $hostIP = $1;
+ $host = $2;
+ my $user = $3;
+ my $reqFileName = $4;
+ if ( !defined($Status{$host}) ) {
+ print(LOG $bpc->timeStamp,
+ "User $user requested restore to unknown host"
+ . " $host");
+ $reply = "restore error: unknown host $host";
+ } else {
+ print(LOG $bpc->timeStamp,
+ "User $user requested restore to $host"
+ . " ($hostIP)\n");
+ unshift(@UserQueue, {
+ host => $host,
+ hostIP => $hostIP,
+ reqFileName => $reqFileName,
+ reqTime => time,
+ dhcp => 0,
+ restore => 1,
+ });
+ $UserQueueOn{$host} = 1;
+ if ( defined($Jobs{$host}) ) {
+ $reply = "ok: requested restore of $host, but a"
+ . " job is currently running,"
+ . " so this request will start later";
+ } else {
+ $reply = "ok: requested restore of $host";
+ }
+ }
+ } elsif ( $cmd =~ /^status\s*(.*)/ ) {
+ my($args) = $1;
+ my($dump, @values, @names);
+ foreach my $type ( split(/\s+/, $args) ) {
+ if ( $type =~ /^queues/ ) {
+ push(@values, \@BgQueue, \@UserQueue, \@CmdQueue);
+ push(@names, qw(*BgQueue *UserQueue *CmdQueue));
+ } elsif ( $type =~ /^jobs/ ) {
+ push(@values, \%Jobs);
+ push(@names, qw(*Jobs));
+ } elsif ( $type =~ /^queueLen/ ) {
+ push(@values, {
+ BgQueue => scalar(@BgQueue),
+ UserQueue => scalar(@UserQueue),
+ CmdQueue => scalar(@CmdQueue),
+ });
+ push(@names, qw(*QueueLen));
+ } elsif ( $type =~ /^info/ ) {
+ push(@values, \%Info);
+ push(@names, qw(*Info));
+ } elsif ( $type =~ /^hosts/ ) {
+ push(@values, \%Status);
+ push(@names, qw(*Status));
+ } elsif ( $type =~ /^host\((.*)\)/
+ && defined($Status{$1}) ) {
+ push(@values, {
+ %{$Status{$1}},
+ BgQueueOn => $BgQueueOn{$1},
+ UserQueueOn => $UserQueueOn{$1},
+ CmdQueueOn => $CmdQueueOn{$1},
+ });
+ push(@names, qw(*StatusHost));
+ } else {
+ print(LOG $bpc->timeStamp,
+ "Unknown status request $type\n");
+ }
+ }
+ $dump = Data::Dumper->new(\@values, \@names);
+ $dump->Indent(0);
+ $reply = $dump->Dump;
+ } elsif ( $cmd =~ /^link\s+(.+)/ ) {
+ my($host) = $1;
+ QueueLink($host);
+ } elsif ( $cmd =~ /^log\s+(.*)/ ) {
+ print(LOG $bpc->timeStamp, "$1\n");
+ } elsif ( $cmd =~ /^quit/ || $cmd =~ /^exit/ ) {
+ $nbytes = 0;
+ last;
+ } else {
+ print(LOG $bpc->timeStamp, "Unknown command $cmd\n");
+ $reply = "error: bad command $cmd";
+ }
+ #
+ # send a reply to the client, at a minimum "ok\n".
+ #
+ $reply = "ok" if ( $reply eq "" );
+ $reply .= "\n";
+ syswrite($Clients{$client}{fh}, $reply, length($reply));
+ }
+ #
+ # Detect possible denial-of-service attack from sending a huge line
+ # (ie: never terminated). 32K seems to be plenty big enough as
+ # a limit.
+ #
+ if ( length($Clients{$client}{mesg}) > 32 * 1024 ) {
+ print(LOG $bpc->timeStamp, "Line too long from client"
+ . " '$Clients{$client}{clientName}':"
+ . " shutting down client connection\n");
+ $nbytes = 0;
+ }
+ #
+ # Shut down the client connection if we read EOF
+ #
+ if ( $nbytes <= 0 ) {
+ close($Clients{$client}{fh});
+ vec($FDread, $Clients{$client}{fn}, 1) = 0;
+ delete($Clients{$client});
+ }
+ }
+ #
+ # Accept any new connections on each of our listen sockets
+ #
+ if ( vec($fdRead, fileno(SERVER_UNIX), 1) ) {
+ local(*CLIENT);
+ my $paddr = accept(CLIENT, SERVER_UNIX);
+ $ClientConnCnt++;
+ $Clients{$ClientConnCnt}{clientName} = "unix socket";
+ $Clients{$ClientConnCnt}{mesg} = "";
+ $Clients{$ClientConnCnt}{fh} = *CLIENT;
+ $Clients{$ClientConnCnt}{fn} = fileno(CLIENT);
+ vec($FDread, $Clients{$ClientConnCnt}{fn}, 1) = 1;
+ #
+ # Generate and send unique seed for MD5 digests to avoid
+ # replay attacks. See BackupPC::Lib::ServerMesg().
+ #
+ my $seed = time . ",$ClientConnCnt,$$,0\n";
+ $Clients{$ClientConnCnt}{seed} = $seed;
+ $Clients{$ClientConnCnt}{mesgCnt} = 0;
+ syswrite($Clients{$ClientConnCnt}{fh}, $seed, length($seed));
+ }
+ if ( $ServerInetPort > 0 && vec($fdRead, fileno(SERVER_INET), 1) ) {
+ local(*CLIENT);
+ my $paddr = accept(CLIENT, SERVER_INET);
+ my($port,$iaddr) = sockaddr_in($paddr);
+ my $name = gethostbyaddr($iaddr, AF_INET);
+ $ClientConnCnt++;
+ $Clients{$ClientConnCnt}{mesg} = "";
+ $Clients{$ClientConnCnt}{fh} = *CLIENT;
+ $Clients{$ClientConnCnt}{fn} = fileno(CLIENT);
+ $Clients{$ClientConnCnt}{clientName} = "$name:$port";
+ vec($FDread, $Clients{$ClientConnCnt}{fn}, 1) = 1;
+ #
+ # Generate and send unique seed for MD5 digests to avoid
+ # replay attacks. See BackupPC::Lib::ServerMesg().
+ #
+ my $seed = time . ",$ClientConnCnt,$$,$port\n";
+ $Clients{$ClientConnCnt}{seed} = $seed;
+ $Clients{$ClientConnCnt}{mesgCnt} = 0;
+ syswrite($Clients{$ClientConnCnt}{fh}, $seed, length($seed));
+ }
+}
+
+###########################################################################
+# Miscellaneous subroutines
+###########################################################################
+
+#
+# Write the current status to $TopDir/log/status.pl
+#
+sub StatusWrite
+{
+ my($dump) = Data::Dumper->new(
+ [ \%Info, \%Status],
+ [qw(*Info *Status)]);
+ $dump->Indent(1);
+ if ( open(STATUS, ">$TopDir/log/status.pl") ) {
+ print(STATUS $dump->Dump);
+ close(STATUS);
+ }
+}
+
+#
+# Queue all the hosts for backup. This means queuing all the fixed
+# ip hosts and all the dhcp address ranges. We also additionally
+# queue the dhcp hosts with a -e flag to check for expired dumps.
+#
+sub QueueAllPCs
+{
+ foreach my $host ( sort(keys(%$Hosts)) ) {
+ delete($Status{$host}{backoffTime})
+ if ( defined($Status{$host}{backoffTime})
+ && $Status{$host}{backoffTime} < time );
+ next if ( defined($Jobs{$host})
+ || $BgQueueOn{$host}
+ || $UserQueueOn{$host}
+ || $CmdQueueOn{$host} );
+ if ( $Hosts->{$host}{dhcp} ) {
+ $Status{$host}{dhcpCheckCnt}++;
+ if ( $RunNightlyWhenIdle ) {
+ #
+ # Once per night queue a check for DHCP hosts that just
+ # checks for expired dumps. We need to do this to handle
+ # the case when a DHCP host has not been on the network for
+ # a long time, and some of the old dumps need to be expired.
+ # Normally expiry checks are done by BackupPC_dump only
+ # after the DHCP hosts has been detected on the network.
+ #
+ unshift(@BgQueue,
+ {host => $host, user => "BackupPC", reqTime => time,
+ dhcp => 0, dumpExpire => 1});
+ $BgQueueOn{$host} = 1;
+ }
+ } else {
+ #
+ # this is a fixed ip host: queue it
+ #
+ unshift(@BgQueue,
+ {host => $host, user => "BackupPC", reqTime => time,
+ dhcp => $Hosts->{$host}{dhcp}});
+ $BgQueueOn{$host} = 1;
+ }
+ }
+ foreach my $dhcp ( @{$Conf{DHCPAddressRanges}} ) {
+ for ( my $i = $dhcp->{first} ; $i <= $dhcp->{last} ; $i++ ) {
+ my $ipAddr = "$dhcp->{ipAddrBase}.$i";
+ next if ( defined($Jobs{$ipAddr})
+ || $BgQueueOn{$ipAddr}
+ || $UserQueueOn{$ipAddr}
+ || $CmdQueueOn{$ipAddr} );
+ #
+ # this is a potential dhcp ip address (we don't know the
+ # host name yet): queue it
+ #
+ unshift(@BgQueue,
+ {host => $ipAddr, user => "BackupPC", reqTime => time,
+ dhcp => 1});
+ $BgQueueOn{$ipAddr} = 1;
+ }
+ }
+}
+
+#
+# Queue a BackupPC_link for the given host
+#
+sub QueueLink
+{
+ my($host) = @_;
+
+ return if ( $CmdQueueOn{$host} );
+ $Status{$host}{state} = "link pending";
+ $Status{$host}{needLink} = 0;
+ unshift(@CmdQueue, {
+ host => $host,
+ user => "BackupPC",
+ reqTime => time,
+ cmd => "$BinDir/BackupPC_link $host"
+ });
+ $CmdQueueOn{$host} = 1;
+}
+
+#
+# Read the hosts file, and update Status if any hosts have been
+# added or deleted. We also track the mtime so the only need to
+# update the hosts file on changes.
+#
+# This function is called at startup, SIGHUP, and on each wakeup.
+# It returns 1 on success and undef on failure.
+#
+sub HostsUpdate
+{
+ my($force) = @_;
+ my $newHosts;
+ #
+ # Nothing to do if we already have the current hosts file
+ #
+ return 1 if ( !$force && defined($Hosts)
+ && $Info{HostsModTime} == $bpc->HostsMTime() );
+ if ( !defined($newHosts = $bpc->HostInfoRead()) ) {
+ print(LOG $bpc->timeStamp, "Can't read hosts file!\n");
+ return;
+ }
+ print(LOG $bpc->timeStamp, "Reading hosts file\n");
+ $Hosts = $newHosts;
+ $Info{HostsModTime} = $bpc->HostsMTime();
+ #
+ # Now update %Status in case any hosts have been added or deleted
+ #
+ foreach my $host ( sort(keys(%$Hosts)) ) {
+ next if ( defined($Status{$host}) );
+ $Status{$host}{state} = "idle";
+ print(LOG $bpc->timeStamp, "Added host $host to backup list\n");
+ }
+ foreach my $host ( sort(keys(%Status)) ) {
+ next if ( $host eq $bpc->trashJob
+ || $host eq $bpc->adminJob
+ || defined($Hosts->{$host})
+ || defined($Jobs{$host})
+ || $BgQueueOn{$host}
+ || $UserQueueOn{$host}
+ || $CmdQueueOn{$host} );
+ print(LOG $bpc->timeStamp, "Deleted host $host from backup list\n");
+ delete($Status{$host});
+ }
+ return 1;
+}
+
+#
+# Remember the signal name for later processing
+#
+sub catch_signal
+{
+ if ( $SigName ) {
+ $SigName = shift;
+ foreach my $host ( keys(%Jobs) ) {
+ kill(2, $Jobs{$host}{pid});
+ }
+ #
+ # In case we are inside the exit handler, reopen the log file
+ #
+ close(LOG);
+ LogFileOpen();
+ print(LOG "Fatal error: unhandled signal $SigName\n");
+ unlink("$TopDir/log/BackupPC.pid");
+ confess("Got new signal $SigName... quitting\n");
+ }
+ $SigName = shift;
+}
+
+#
+# Open the log file and point STDOUT and STDERR there too
+#
+sub LogFileOpen
+{
+ mkpath("$TopDir/log", 0, 0777) if ( !-d "$TopDir/log" );
+ open(LOG, ">>$TopDir/log/LOG")
+ || die("Can't create LOG file $TopDir/log/LOG");
+ close(STDOUT);
+ close(STDERR);
+ open(STDOUT, ">&LOG");
+ open(STDERR, ">&LOG");
+ select(LOG); $| = 1;
+ select(STDERR); $| = 1;
+ select(STDOUT); $| = 1;
+}
+
+#
+# Initialize the unix-domain and internet-domain sockets that
+# we listen to for client connections (from the CGI script and
+# some of the BackupPC sub-programs).
+#
+sub ServerSocketInit
+{
+ if ( !defined(fileno(SERVER_UNIX)) ) {
+ #
+ # one-time only: initialize unix-domain socket
+ #
+ if ( !socket(SERVER_UNIX, PF_UNIX, SOCK_STREAM, 0) ) {
+ print(LOG $bpc->timeStamp, "unix socket() failed: $!\n");
+ exit(1);
+ }
+ my $sockFile = "$TopDir/log/BackupPC.sock";
+ unlink($sockFile);
+ if ( !bind(SERVER_UNIX, sockaddr_un($sockFile)) ) {
+ print(LOG $bpc->timeStamp, "unix bind() failed: $!\n");
+ exit(1);
+ }
+ if ( !listen(SERVER_UNIX, SOMAXCONN) ) {
+ print(LOG $bpc->timeStamp, "unix listen() failed: $!\n");
+ exit(1);
+ }
+ vec($FDread, fileno(SERVER_UNIX), 1) = 1;
+ }
+ return if ( $ServerInetPort == $Conf{ServerPort} );
+ if ( $ServerInetPort > 0 ) {
+ vec($FDread, fileno(SERVER_INET), 1) = 0;
+ close(SERVER_INET);
+ $ServerInetPort = -1;
+ }
+ if ( $Conf{ServerPort} > 0 ) {
+ #
+ # Setup a socket to listen on $Conf{ServerPort}
+ #
+ my $proto = getprotobyname('tcp');
+ if ( !socket(SERVER_INET, PF_INET, SOCK_STREAM, $proto) ) {
+ print(LOG $bpc->timeStamp, "inet socket() failed: $!\n");
+ exit(1);
+ }
+ if ( !setsockopt(SERVER_INET, SOL_SOCKET, SO_REUSEADDR, pack("l",1)) ) {
+ print(LOG $bpc->timeStamp, "setsockopt() failed: $!\n");
+ exit(1);
+ }
+ if ( !bind(SERVER_INET, sockaddr_in($Conf{ServerPort}, INADDR_ANY)) ) {
+ print(LOG $bpc->timeStamp, "inet bind() failed: $!\n");
+ exit(1);
+ }
+ if ( !listen(SERVER_INET, SOMAXCONN) ) {
+ print(LOG $bpc->timeStamp, "inet listen() failed: $!\n");
+ exit(1);
+ }
+ vec($FDread, fileno(SERVER_INET), 1) = 1;
+ $ServerInetPort = $Conf{ServerPort};
+ }
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_compressPool: Compress existing pool
+#
+# DESCRIPTION
+#
+# Usage: BackupPC_compressPool [-t] [-r] <host>
+#
+# Flags:
+# -t test mode: do everything except actually replace the pool files.
+# Useful for estimating total run time without making any real
+# changes.
+# -r read check: re-read the compressed file and compare it against
+# the original uncompressed file. Can only be used in test mode.
+# -c # number of children to fork. BackupPC_compressPool can take
+# a long time to run, so to speed things up it spawns four children,
+# each working on a different part of the pool. You can change
+# the number of children with the -c option.
+#
+# BackupPC_compressPool is used to convert an uncompressed pool to
+# a compressed pool. If BackupPC compression is enabled after
+# uncompressed backups already exist, BackupPC_compressPool can
+# be used to compress all the old uncompressed backups.
+#
+# It is important that BackupPC not run while BackupPC_compressPool
+# runs. Also, BackupPC_compressPool must run to completion before
+# BackupPC is restarted.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+
+use File::Find;
+use File::Path;
+use Compress::Zlib;
+use Getopt::Std;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+$bpc->ChildInit();
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+my $PoolDir = "$TopDir/pool";
+my $CPoolDir = "$TopDir/cpool";
+my $Compress = $Conf{CompressLevel};
+my %opts;
+my $SigName = "";
+
+#
+# Catch various signals
+#
+foreach my $sig ( qw(INT BUS SEGV PIPE TERM ALRM HUP) ) {
+ $SIG{$sig} = \&catch_signal;
+}
+
+$| = 1;
+
+my $CompMaxRead = 131072; # 128K
+my $CompMaxWrite = 6291456; # 6MB
+
+if ( !getopts("trc:", \%opts) || @ARGV != 0 ) {
+ print("usage: $0 [-c nChild] [-r] [-t]\n");
+ exit(1);
+}
+my $TestMode = $opts{t};
+my $ReadCheck = $opts{r};
+my $nChild = $opts{c} || 4;
+if ( $ReadCheck && !$TestMode ) {
+ print(STDERR "$0: -r (read check) option must have -t (test)\n");
+ exit(1);
+}
+if ( $nChild < 1 || $nChild >= 16 ) {
+ print(STDERR "$0: number of children (-c option) must be from 1 to 16\n");
+ exit(1);
+}
+if ( !BackupPC::FileZIO->compOk ) {
+ print STDERR <<EOF;
+$0: Compress::Zlib is not installed. You need to install it
+before running this script.
+EOF
+ exit(1);
+}
+if ( $Compress <= 0 ) {
+ print STDERR <<EOF;
+$0: compression is not enabled. \%Conf{CompressLevel} needs
+to be set to a value from 1 to 9. Please edit the config.pl file and
+re-start $0.
+EOF
+ exit(1);
+}
+
+my $Errors = 0;
+my $SubDirDone = 0;
+my $SubDirCnt = 0;
+my $SubDirCurr = 0;
+my $FileCnt = 0;
+my $FileOrigSz = 0;
+my $FileCompressSz = 0;
+
+my $err = $bpc->ServerConnect($Conf{ServerHost}, $Conf{ServerPort});
+if ( $err eq "" ) {
+ print <<EOF;
+BackupPC is running on $Conf{ServerHost}. You need to stop BackupPC
+before you can upgrade the code. Depending upon your installation,
+you could run "/etc/init.d/backuppc stop".
+EOF
+ exit(1);
+}
+
+umask($Conf{UmaskMode});
+
+sub cpoolFileName
+{
+ my($new) = @_;
+ if ( $new !~ m{/(\w/\w/\w)/(\w{32})(_\d+)?$} ) {
+ print("Error: Can't parse filename from $new\n");
+ $Errors++;
+ return;
+ }
+ my $dir = "$CPoolDir/$1";
+ $new = "$dir/$2";
+ mkpath($dir, 0, 0777) if ( !-d $dir );
+ return $new if ( !-f $new );
+ for ( my $i = 0 ; ; $i++ ) {
+ return "${new}_$i" if ( !-f "${new}_$i" );
+ }
+}
+
+sub doCompress
+{
+ my $file = ($File::Find::name =~ /(.*)/ && $1);
+ local(*FH, *OUT);
+ my(@s) = stat($file);
+ my($n, $dataIn, $dataOut, $flush, $copy);
+
+ if ( $SigName ) {
+ print("Child got signal $SigName; quitting\n");
+ reportStats();
+ exit(0);
+ }
+ return if ( !-f $file );
+ my $defl = deflateInit(
+ -Bufsize => 65536,
+ -Level => $Compress,
+ );
+ if ( !open(FH, $TestMode ? "<$file" : "+<$file") ) {
+ print("Error: Can't open $file for read/write\n");
+ $Errors++;
+ return;
+ }
+ while ( sysread(FH, $dataIn, $CompMaxWrite) > 0 ) {
+ $flush = 0;
+ $FileOrigSz += length($dataIn);
+ my $fragOut = $defl->deflate($dataIn);
+ if ( length($fragOut) < $CompMaxRead ) {
+ #
+ # Compression is too high: to avoid huge memory requirements
+ # on read we need to flush().
+ #
+ $fragOut .= $defl->flush();
+ $flush = 1;
+ $defl = deflateInit(
+ -Bufsize => 65536,
+ -Level => $Compress,
+ );
+ }
+ $dataOut .= $fragOut;
+ if ( !$copy && length($dataOut) > $CompMaxWrite ) {
+ if ( !open(OUT, "+>$file.__z") ) {
+ print("Error: Can't open $file.__z for write\n");
+ $Errors++;
+ close(FH);
+ return;
+ }
+ $copy = 1;
+ }
+ if ( $copy && $dataOut ne "" ) {
+ if ( syswrite(OUT, $dataOut) != length($dataOut) ) {
+ printf("Error: Can't write %d bytes to %s\n",
+ length($dataOut), "$file.__z");
+ $Errors++;
+ close(OUT);
+ close(FH);
+ unlink("$file.__z");
+ return;
+ }
+ $FileCompressSz += length($dataOut);
+ $dataOut = undef;
+ }
+ }
+ if ( !$flush ) {
+ $dataOut .= $defl->flush();
+ if ( $copy && $dataOut ne "" ) {
+ if ( syswrite(OUT, $dataOut) != length($dataOut) ) {
+ printf("Error: Can't write %d bytes to %s\n",
+ length($dataOut), "$file.__z");
+ $Errors++;
+ close(OUT);
+ close(FH);
+ unlink("$file.__z");
+ return;
+ }
+ $FileCompressSz += length($dataOut);
+ $dataOut = undef;
+ }
+ }
+ my $newFile = cpoolFileName($file);
+ if ( $TestMode ) {
+ close(FH);
+ if ( !open(FH, ">$newFile") ) {
+ print("Error: Can't open $newFile for write\n");
+ $Errors++;
+ close(FH);
+ unlink("$file.__z");
+ return;
+ }
+ }
+ if ( $copy ) {
+ if ( !sysseek(OUT, 0, 0) ) {
+ print("Error: Can't seek $file.__z to 0\n");
+ $Errors++;
+ }
+ if ( !sysseek(FH, 0, 0) ) {
+ print("Error: Can't seek $newFile to 0\n");
+ $Errors++;
+ }
+ while ( sysread(OUT, $dataIn, $CompMaxWrite) > 0 ) {
+ if ( syswrite(FH, $dataIn) != length($dataIn) ) {
+ printf("Error: Can't write %d bytes to %s\n",
+ length($dataIn), $file);
+ $Errors++;
+ }
+ }
+ if ( !truncate(FH, sysseek(OUT, 0, 1)) ) {
+ printf("Error: Can't truncate %s to %d\n",
+ $file, sysseek(OUT, 0, 1));
+ $Errors++;
+ }
+ close(OUT);
+ close(FH);
+ unlink("$file.__z");
+ } else {
+ if ( !sysseek(FH, 0, 0) ) {
+ print("Error: Can't seek $file to 0\n");
+ $Errors++;
+ }
+ if ( syswrite(FH, $dataOut) != length($dataOut) ) {
+ printf("Error: Can't write %d bytes to %s\n",
+ length($dataOut), $file);
+ $Errors++;
+ }
+ $FileCompressSz += length($dataOut);
+ if ( !truncate(FH, length($dataOut)) ) {
+ printf("Error: Can't truncate %s to %d\n", $file, length($dataOut));
+ $Errors++;
+ }
+ close(FH);
+ }
+ if ( $TestMode ) {
+ if ( $ReadCheck ) {
+ checkRead($file, $newFile);
+ }
+ unlink($newFile);
+ } else {
+ rename($file, $newFile);
+ my $atime = $s[8] =~ /(.*)/ && $1;
+ my $mtime = $s[9] =~ /(.*)/ && $1;
+ utime($atime, $mtime, $newFile);
+ }
+ (my $dir = $file) =~ s{/[^/]*$}{};
+ $FileCnt++;
+ if ( $SubDirCurr ne "" && $SubDirCurr ne $dir ) {
+ $SubDirDone++;
+ $SubDirCurr = $dir;
+ reportStats();
+ } elsif ( $SubDirCurr eq "" ) {
+ $SubDirCurr = $dir;
+ }
+}
+
+sub reportStats
+{
+ print("stats: $SubDirDone $SubDirCnt $FileCnt $FileOrigSz"
+ . " $FileCompressSz $Errors\n");
+}
+
+sub checkRead
+{
+ my($file, $cfile) = @_;
+ return if ( !-f $file || !-f $cfile );
+ my $f = BackupPC::FileZIO->open($cfile, 0, $Compress)
+ || die("can't open $cfile for read\n");
+ my($n, $nd, $r, $d, $d0);
+ local(*FH);
+
+ if ( !open(FH, $file) ) {
+ print("can't open $file for check\n");
+ $Errors++;
+ $f->close();
+ return;
+ }
+ #print("comparing $file to $cfile\n");
+ while ( 1 ) {
+ $n = 1 + int(rand($CompMaxRead) + rand(100));
+ $r = $f->read(\$d, $n);
+ sysread(FH, $d0, $n);
+ if ( $d ne $d0 ) {
+ print("Botch read data on $cfile\n");
+ }
+ last if ( length($d) == 0 );
+ }
+ if ( ($r = $f->read(\$d, 100)) != 0 || ($r = $f->read(\$d, 100)) != 0 ) {
+ printf("Botch at EOF on $cfile got $r (%d,%d)\n",
+ sysseek(FH, 0, 1), $n);
+ $Errors++;
+ }
+ $f->close;
+ close(FH);
+}
+
+sub checkReadLine
+{
+ my($file, $cfile) = @_;
+ return if ( !-f $file || !-f $cfile );
+ my $f = BackupPC::FileZIO->open($cfile, 0, $Compress)
+ || die("can't open $cfile for read\n");
+ my($n, $nd, $r, $d, $d0);
+ local(*FH);
+
+ if ( !open(FH, $file) ) {
+ print("can't open $file for check\n");
+ $Errors++;
+ $f->close();
+ return;
+ }
+ while ( 1 ) {
+ $d0 = <FH>;
+ $d = $f->readLine();
+ if ( $d ne $d0 ) {
+ print("Botch read data on $cfile\n");
+ }
+ last if ( length($d) == 0 );
+ }
+ if ( ($r = $f->read(\$d, 100)) != 0 || ($r = $f->read(\$d, 100)) != 0 ) {
+ printf("Botch at EOF on $cfile got $r (%d,%d)\n",
+ sysseek(FH, 0, 1), $n);
+ $Errors++;
+ }
+ $f->close;
+ close(FH);
+}
+
+sub catch_signal
+{
+ $SigName = shift;
+}
+
+sub compressHostFiles
+{
+ my($host) = @_;
+ my(@Files, @Backups, $fh, $data);
+ local(*FH);
+
+ if ( !defined($host) ) {
+ for ( my $i = 0 ; ; $i++ ) {
+ last if ( !-f "$TopDir/log/LOG.$i" );
+ push(@Files, "$TopDir/log/LOG.$i");
+ }
+ } else {
+ @Backups = $bpc->BackupInfoRead($host);
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ next if ( $Backups[$i]{compress} );
+ push(@Files, "$TopDir/pc/$host/SmbLOG.$Backups[$i]{num}");
+ push(@Files, "$TopDir/pc/$host/XferLOG.$Backups[$i]{num}");
+ }
+ push(@Files, "$TopDir/pc/$host/SmbLOG.bad");
+ push(@Files, "$TopDir/pc/$host/XferLOG.bad");
+ for ( my $i = 0 ; ; $i++ ) {
+ last if ( !-f "$TopDir/pc/$host/LOG.$i" );
+ push(@Files, "$TopDir/pc/$host/LOG.$i");
+ }
+ }
+ foreach my $file ( @Files ) {
+ if ( $SigName ) {
+ print("Child got signal $SigName; quitting\n");
+ reportStats();
+ exit(0);
+ }
+ next if ( !-f $file );
+ if ( !BackupPC::FileZIO->compressCopy($file, "$file.z", undef,
+ $Compress, !$TestMode) ) {
+ print("compressCopy($file, $file.z, $Compress, !$TestMode)"
+ . " failed\n");
+ $Errors++;
+ } elsif ( $TestMode ) {
+ checkReadLine($file, "$file.z") if ( $ReadCheck );
+ unlink("$file.z");
+ }
+ }
+}
+
+sub updateHostBackupInfo
+{
+ my($host) = @_;
+ if ( !$TestMode ) {
+ my @Backups = $bpc->BackupInfoRead($host);
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ $Backups[$i]{compress} = $Compress;
+ }
+ $bpc->BackupInfoWrite($host, @Backups);
+ }
+}
+
+my @Dirs = split(//, "0123456789abcdef");
+my @Hosts = sort(keys(%{$bpc->HostInfoRead()}));
+my $FDread;
+my @Jobs;
+
+#
+# First make sure there are no existing compressed backups
+#
+my(%compHosts, $compCnt);
+for ( my $j = 0 ; $j < @Hosts ; $j++ ) {
+ my $host = $Hosts[$j];
+ my @Backups = $bpc->BackupInfoRead($host);
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ next if ( !$Backups[$i]{compress} );
+ $compHosts{$host}++;
+ $compCnt++;
+ }
+}
+if ( $compCnt ) {
+ my $compHostStr = join("\n + ", sort(keys(%compHosts)));
+ print STDERR <<EOF;
+BackupPC_compressPool: there are $compCnt compressed backups.
+BackupPC_compressPool can only be run when there are no existing
+compressed backups. The following hosts have compressed backups:
+
+ + $compHostStr
+
+If you really want to run BackupPC_compressPool you will need to remove
+all the existing compressed backups (and /home/pcbackup/data/cpool).
+Think carefully before you do this. Otherwise, you can just let new
+compressed backups run and the old uncompressed backups and pool will
+steadily expire.
+EOF
+ exit(0);
+}
+
+#
+# Next spawn $nChild children that actually do all the work.
+#
+for ( my $i = 0 ; $i < $nChild ; $i++ ) {
+ local(*CHILD);
+ my $pid;
+ if ( !defined($pid = open(CHILD, "-|")) ) {
+ print("Can't fork\n");
+ next;
+ }
+ my $nDirs = @Dirs / ($nChild - $i);
+ my $nHosts = @Hosts / ($nChild - $i);
+ if ( !$pid ) {
+ #
+ # This is the child.
+ # First process each of the hosts (compress per-pc log files etc).
+ #
+ for ( my $j = 0 ; $j < $nHosts ; $j++ ) {
+ compressHostFiles($Hosts[$j]);
+ }
+ #
+ # Count the total number of directories so we can estimate the
+ # completion time. We ignore empty directories by reading each
+ # directory and making sure it has at least 3 entries (ie, ".",
+ # ".." and a file).
+ #
+ for ( my $j = 0 ; $j < $nDirs ; $j++ ) {
+ my $thisDir = $Dirs[$j];
+ next if ( !-d "$PoolDir/$thisDir" );
+ foreach my $dir ( <$PoolDir/$thisDir/*/*> ) {
+ next if ( !opendir(DIR, $dir) );
+ my @files = readdir(DIR);
+ closedir(DIR);
+ $SubDirCnt++ if ( @files > 2 );
+ }
+ }
+ #
+ # Now process each of the directories
+ #
+ for ( my $j = 0 ; $j < $nDirs ; $j++ ) {
+ my $thisDir = shift(@Dirs);
+ next if ( !-d "$PoolDir/$thisDir" );
+ find({wanted => sub { doCompress($File::Find::name); },
+ no_chdir => 1}, "$PoolDir/$thisDir");
+ }
+ #
+ # Last, update the backup info file for each of the hosts
+ #
+ for ( my $j = 0 ; $j < $nHosts ; $j++ ) {
+ updateHostBackupInfo($Hosts[$j]);
+ }
+ $SubDirDone = $SubDirCnt;
+ reportStats();
+ exit(0);
+ }
+ #
+ # This is the parent. Peel off $nDirs directories, $nHosts hosts,
+ # and continue
+ #
+ $Jobs[$i]{fh} = *CHILD;
+ $Jobs[$i]{pid} = $pid;
+ vec($FDread, fileno($Jobs[$i]{fh}), 1) = 1;
+ splice(@Dirs, 0, $nDirs);
+ splice(@Hosts, 0, $nHosts);
+}
+
+#
+# compress the main log files (in the parents)
+#
+compressHostFiles(undef);
+
+#
+# Now wait for all the children to report results and finish up
+#
+my $TimeStart = time;
+my $DonePct = 0;
+my $GotSignal = "";
+while ( $FDread !~ /^\0*$/ ) {
+ my $ein = $FDread;
+ select(my $rout = $FDread, undef, $ein, undef);
+ if ( $SigName ne $GotSignal ) {
+ print("Got signal $SigName; waiting for $nChild children to cleanup\n");
+ $GotSignal = $SigName;
+ }
+ for ( my $i = 0 ; $i < $nChild ; $i++ ) {
+ next if ( !vec($rout, fileno($Jobs[$i]{fh}), 1) );
+ my $data;
+ if ( sysread($Jobs[$i]{fh}, $data, 1024) <= 0 ) {
+ vec($FDread, fileno($Jobs[$i]{fh}), 1) = 0;
+ close($Jobs[$i]{fh});
+ next;
+ }
+ $Jobs[$i]{mesg} .= $data;
+ while ( $Jobs[$i]{mesg} =~ /(.*?)[\n\r]+(.*)/s ) {
+ my $mesg = $1;
+ $Jobs[$i]{mesg} = $2;
+ if ( $mesg =~ /^stats: (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)/ ) {
+ $Jobs[$i]{SubDirDone} = $1;
+ $Jobs[$i]{SubDirCnt} = $2;
+ $Jobs[$i]{FileCnt} = $3;
+ $Jobs[$i]{FileOrigSz} = $4;
+ $Jobs[$i]{FileCompressSz} = $5;
+ $Jobs[$i]{Errors} = $6;
+ $SubDirDone = $SubDirCnt = $FileCnt = $FileOrigSz = 0;
+ $FileCompressSz = $Errors = 0;
+ my $numReports = 0;
+ for ( my $j = 0 ; $j < $nChild ; $j++ ) {
+ next if ( !defined($Jobs[$j]{SubDirDone}) );
+ $SubDirDone += $Jobs[$j]{SubDirDone};
+ $SubDirCnt += $Jobs[$j]{SubDirCnt};
+ $FileCnt += $Jobs[$j]{FileCnt};
+ $FileOrigSz += $Jobs[$j]{FileOrigSz};
+ $FileCompressSz += $Jobs[$j]{FileCompressSz};
+ $Errors += $Jobs[$j]{Errors};
+ $numReports++;
+ }
+ $SubDirCnt ||= 1;
+ $FileOrigSz ||= 1;
+ my $pctDone = 100 * $SubDirDone / $SubDirCnt;
+ if ( $numReports == $nChild && $pctDone >= $DonePct + 1 ) {
+ $DonePct = int($pctDone);
+ my $estSecLeft = 1.2 * (time - $TimeStart)
+ * (100 / $pctDone - 1);
+ my $timeStamp = $bpc->timeStamp;
+ printf("%sDone %2.0f%% (%d of %d dirs, %d files,"
+ . " %.2fGB raw, %.1f%% reduce, %d errors)\n",
+ $timeStamp,
+ $pctDone, $SubDirDone, $SubDirCnt, $FileCnt,
+ $FileOrigSz / (1024 * 1024 * 1000),
+ 100 * (1 - $FileCompressSz / $FileOrigSz));
+ printf("%s Est complete in %.1f hours (around %s)\n",
+ $timeStamp, $estSecLeft / 3600,
+ $bpc->timeStamp(time + $estSecLeft, 1))
+ if ( $DonePct < 100 );
+ }
+ } else {
+ print($mesg, "\n");
+ }
+ }
+ }
+}
+if ( $Errors ) {
+ print("Finished with $Errors errors!!!!\n");
+ exit(1);
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_dump: Dump a single PC.
+#
+# DESCRIPTION
+#
+# Usage: BackupPC_dump [-i] [-f] [-d] [-e] <host>
+#
+# Flags:
+#
+# -i Do an incremental dump, overriding any scheduling (but a full
+# dump will be done if no dumps have yet succeeded)
+#
+# -f Do a full dump, overriding any scheduling.
+#
+# -d Host is a DHCP pool address, so initially we have no
+# idea which machine this actually is. BackupPC_dump
+# determines the actual PC host name by using the NetBios
+# name.
+#
+# -e Just do an dump expiry check for the host. Don't do anything else. # This is used periodically by BackupPC to make sure that dhcp hosts
+# have correctly expired old backups. Without this, dhcp hosts that
+# are no longer on the network will not expire old backups.
+#
+# BackupPC_dump is run periodically by BackupPC to backup $host.
+# The file $TopDir/pc/$host/backups is read to decide whether a
+# full or incremental backup needs to be run. If no backup is
+# scheduled, or a ping to $host fails, then BackupPC_dump quits.
+#
+# The backup is done using smbclient, extracting the dump into
+# $TopDir/pc/$host/new. The smbclient output is put into
+# $TopDir/pc/$host/XferLOG.
+#
+# If the dump succeeds (based on parsing the output of smbclient):
+# - $TopDir/pc/$host/new is renamed to $TopDir/pc/$host/nnn, where
+# nnn is the next sequential dump number.
+# - $TopDir/pc/$host/XferLOG is renamed to $TopDir/pc/$host/XferLOG.nnn.
+# - $TopDir/pc/$host/backups is updated.
+#
+# If the dump fails:
+# - $TopDir/pc/$host/new is moved to $TopDir/trash for later removal.
+# - $TopDir/pc/$host/XferLOG is renamed to $TopDir/pc/$host/XferLOG.bad
+# for later viewing.
+#
+# BackupPC_dump communicates to BackupPC via printing to STDOUT.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+use BackupPC::Xfer::Smb;
+use BackupPC::Xfer::Tar;
+
+use File::Path;
+use Getopt::Std;
+
+###########################################################################
+# Initialize
+###########################################################################
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+$bpc->ChildInit();
+
+my %opts;
+getopts("defi", \%opts);
+if ( @ARGV != 1 ) {
+ print("usage: $0 [-d] [-e] [-f] [-i] <host>\n");
+ exit(1);
+}
+if ( $ARGV[0] !~ /^([\w\.-]+)$/ ) {
+ print("$0: bad host name '$ARGV[0]'\n");
+ exit(1);
+}
+my $hostIP = $1;
+my($host, $user);
+
+if ( $opts{d} ) {
+ #
+ # The host name $hostIP is simply a DHCP address. We need to check
+ # if there is any machine at this address, and if so, get the actual
+ # host name via NetBios using nmblookup.
+ #
+ exit(1) if ( $bpc->CheckHostAlive($hostIP) < 0 );
+ ($host, $user) = $bpc->NetBiosInfoGet($hostIP);
+ exit(1) if ( $host !~ /^([\w\.-]+)$/ );
+ my $hosts = $bpc->HostInfoRead($host);
+ exit(1) if ( !defined($hosts->{$host}) );
+} else {
+ $host = $hostIP;
+}
+
+my $Dir = "$TopDir/pc/$host";
+my $xferPid = -1;
+my $tarPid = -1;
+
+#
+# Re-read config file, so we can include the PC-specific config
+#
+$bpc->ConfigRead($host);
+%Conf = $bpc->Conf();
+
+#
+# Catch various signals
+#
+$SIG{INT} = \&catch_signal;
+$SIG{ALRM} = \&catch_signal;
+$SIG{TERM} = \&catch_signal;
+
+#
+# Make sure we eventually timeout if there is no activity from
+# the data transport program.
+#
+alarm($Conf{SmbClientTimeout});
+
+mkpath($Dir, 0, 0777) if ( !-d $Dir );
+if ( !-f "$Dir/LOCK" ) {
+ open(LOCK, ">$Dir/LOCK") && close(LOCK);
+}
+open(LOG, ">>$Dir/LOG");
+select(LOG); $| = 1; select(STDOUT);
+
+###########################################################################
+# Figure out what to do and do it
+###########################################################################
+
+#
+# For the -e option we just expire backups and quit
+#
+if ( $opts{e} ) {
+ BackupExpire($host);
+ exit(0);
+}
+
+#
+# See if we should skip this host during a certain range
+# of times.
+#
+my $err = $bpc->ServerConnect($Conf{ServerHost}, $Conf{ServerPort});
+if ( $err ne "" ) {
+ print("Can't connect to server ($err)\n");
+ print(LOG $bpc->timeStamp, "Can't connect to server ($err)\n");
+ exit(1);
+}
+my $reply = $bpc->ServerMesg("status host($host)");
+$reply = $1 if ( $reply =~ /(.*)/s );
+my(%StatusHost);
+eval($reply);
+$bpc->ServerDisconnect();
+
+#
+# For DHCP tell BackupPC which host this is
+#
+if ( $opts{d} ) {
+ if ( $StatusHost{activeJob} ) {
+ # oops, something is already running for this host
+ exit(0);
+ }
+ print("DHCP $hostIP $host\n");
+}
+
+my($needLink, @Backups, $type);
+my $lastFull = 0;
+my $lastIncr = 0;
+
+if ( !$opts{i} && !$opts{f} && $Conf{BlackoutGoodCnt} >= 0
+ && $StatusHost{aliveCnt} >= $Conf{BlackoutGoodCnt} ) {
+ my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+ my($currHours) = $hour + $min / 60 + $sec / 3600;
+ if ( $Conf{BlackoutHourBegin} <= $currHours
+ && $currHours <= $Conf{BlackoutHourEnd}
+ && grep($_ == $wday, @{$Conf{BlackoutWeekDays}}) ) {
+ print(LOG $bpc->timeStamp, "skipping because of blackout"
+ . " (alive $StatusHost{aliveCnt} times)\n");
+ print("nothing to do\n");
+ print("link $host\n") if ( $needLink );
+ exit(1);
+ }
+}
+
+if ( !$opts{i} && !$opts{f} && $StatusHost{backoffTime} > time ) {
+ printf(LOG "%sskipping because of user requested delay (%.1f hours left)",
+ $bpc->timeStamp, ($StatusHost{backoffTime} - time) / 3600);
+ print("nothing to do\n");
+ print("link $host\n") if ( $needLink );
+ exit(1);
+}
+
+#
+# Now see if there are any old backups we should delete
+#
+BackupExpire($host);
+
+#
+# Read Backup information, and find times of the most recent full and
+# incremental backups
+#
+@Backups = $bpc->BackupInfoRead($host);
+for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ $needLink = 1 if ( $Backups[$i]{nFilesNew} eq ""
+ || -f "$Dir/NewFileList.$Backups[$i]{num}" );
+ if ( $Backups[$i]{type} eq "full" ) {
+ $lastFull = $Backups[$i]{startTime}
+ if ( $lastFull < $Backups[$i]{startTime} );
+ } else {
+ $lastIncr = $Backups[$i]{startTime}
+ if ( $lastIncr < $Backups[$i]{startTime} );
+ }
+}
+
+#
+# Decide whether we do nothing, or a full or incremental backup.
+#
+if ( @Backups == 0
+ || $opts{f}
+ || (!$opts{i} && (time - $lastFull > $Conf{FullPeriod} * 24*3600
+ && time - $lastIncr > $Conf{IncrPeriod} * 24*3600)) ) {
+ $type = "full";
+} elsif ( $opts{i} || (time - $lastIncr > $Conf{IncrPeriod} * 24*3600
+ && time - $lastFull > $Conf{IncrPeriod} * 24*3600) ) {
+ $type = "incr";
+} else {
+ print("nothing to do\n");
+ print("link $host\n") if ( $needLink );
+ exit(0);
+}
+
+#
+# Check if $host is alive
+#
+my $delay = $bpc->CheckHostAlive($hostIP);
+if ( $delay < 0 ) {
+ print(LOG $bpc->timeStamp, "no ping response\n");
+ print("no ping response\n");
+ print("link $host\n") if ( $needLink );
+ exit(1);
+} elsif ( $delay > $Conf{PingMaxMsec} ) {
+ printf(LOG "%sping too slow: %.4gmsec\n", $bpc->timeStamp, $delay);
+ printf("ping too slow: %.4gmsec (threshold is %gmsec)\n",
+ $delay, $Conf{PingMaxMsec});
+ print("link $host\n") if ( $needLink );
+ exit(1);
+}
+
+#
+# Make sure it is really the machine we expect (only for fixed addresses,
+# since we got the DHCP address above).
+#
+if ( !$opts{d} && (my $errMsg = CorrectHostCheck($hostIP, $host)) ) {
+ print(LOG $bpc->timeStamp, "dump failed: $errMsg\n");
+ print("dump failed: $errMsg\n");
+ exit(1);
+} elsif ( $opts{d} ) {
+ print(LOG $bpc->timeStamp, "$host is dhcp $hostIP, user is $user\n");
+}
+
+#
+# Get a clean directory $Dir/new
+#
+$bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
+
+#
+# Setup file extension for compression and open XferLOG output file
+#
+$Conf{CompressLevel} = 0 if ( !BackupPC::FileZIO->compOk );
+my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+my $XferLOG = BackupPC::FileZIO->open("$Dir/XferLOG$fileExt", 1,
+ $Conf{CompressLevel});
+if ( !defined($XferLOG) ) {
+ print(LOG $bpc->timeStamp, "dump failed: unable to open/create"
+ . " $Dir/XferLOG$fileExt\n");
+ print("dump failed: unable to open/create $Dir/XferLOG$fileExt\n");
+ exit(1);
+}
+unlink("$Dir/NewFileList");
+my $startTime = time();
+
+my $tarErrs = 0;
+my $nFilesExist = 0;
+my $sizeExist = 0;
+my $sizeExistComp = 0;
+my $nFilesTotal = 0;
+my $sizeTotal = 0;
+my($logMsg, %stat, $xfer, $ShareNames);
+
+if ( $Conf{XferMethod} eq "tar" ) {
+ $ShareNames = $Conf{TarShareName};
+} else {
+ $ShareNames = $Conf{SmbShareName};
+}
+
+$ShareNames = [ $ShareNames ] unless ref($ShareNames) eq "ARRAY";
+
+#
+# Now backup each of the shares
+#
+for my $shareName ( @$ShareNames ) {
+ local(*RH, *WH);
+
+ $stat{xferOK} = $stat{hostAbort} = undef;
+ $stat{hostError} = $stat{lastOutputLine} = undef;
+ if ( -d "$Dir/new/$shareName" ) {
+ print(LOG $bpc->timeStamp,
+ "unexpected repeated share name $shareName skipped\n");
+ next;
+ }
+
+ #
+ # Create a pipe to connect smbclient to BackupPC_tarExtract
+ # WH is the write handle for writing, provided to the transport
+ # program, and RH is the other end of the pipe for reading,
+ # provided to BackupPC_tarExtract.
+ #
+ pipe(RH, WH);
+
+ #
+ # fork a child for BackupPC_tarExtract. TAR is a file handle
+ # on which we (the parent) read the stdout & stderr from
+ # BackupPC_tarExtract.
+ #
+ if ( !defined($tarPid = open(TAR, "-|")) ) {
+ print(LOG $bpc->timeStamp, "can't fork to run tar\n");
+ print("can't fork to run tar\n");
+ close(RH);
+ close(WH);
+ last;
+ }
+ if ( !$tarPid ) {
+ #
+ # This is the tar child. Close the write end of the pipe,
+ # clone STDERR to STDOUT, clone STDIN from RH, and then
+ # exec BackupPC_tarExtract.
+ #
+ setpgrp 0,0;
+ close(WH);
+ close(STDERR);
+ open(STDERR, ">&STDOUT");
+ close(STDIN);
+ open(STDIN, "<&RH");
+ exec("$BinDir/BackupPC_tarExtract '$host' '$shareName'"
+ . " $Conf{CompressLevel}");
+ print(LOG $bpc->timeStamp, "can't exec $BinDir/BackupPC_tarExtract\n");
+ exit(0);
+ }
+
+ #
+ # Run the transport program
+ #
+ my $xferArgs = {
+ host => $host,
+ hostIP => $hostIP,
+ shareName => $shareName,
+ pipeRH => *RH,
+ pipeWH => *WH,
+ XferLOG => $XferLOG,
+ outDir => $Dir,
+ type => $type,
+ lastFull => $lastFull,
+ };
+ if ( $Conf{XferMethod} eq "tar" ) {
+ #
+ # Use tar (eg: tar/ssh) as the transport program.
+ #
+ $xfer = BackupPC::Xfer::Tar->new($bpc, $xferArgs);
+ } else {
+ #
+ # Default is to use smbclient (smb) as the transport program.
+ #
+ $xfer = BackupPC::Xfer::Smb->new($bpc, $xferArgs);
+ }
+ if ( !defined($logMsg = $xfer->start()) ) {
+ print(LOG $bpc->timeStamp, $xfer->errStr, "\n");
+ print($xfer->errStr, "\n");
+ print("link $host\n") if ( $needLink );
+ #
+ # kill off the tar process, first nicely then forcefully
+ #
+ kill(2, $tarPid);
+ sleep(1);
+ kill(9, $tarPid);
+ exit(1);
+ }
+ #
+ # The parent must close both handles on the pipe since the children
+ # are using these handles now.
+ #
+ close(RH);
+ close(WH);
+ $xferPid = $xfer->xferPid;
+ print(LOG $bpc->timeStamp, $logMsg,
+ " (xferPid=$xferPid, tarPid=$tarPid)\n");
+ print("started $type dump, pid=$xferPid, tarPid=$tarPid\n");
+
+ #
+ # Parse the output of the transfer program and BackupPC_tarExtract
+ # while they run. Since we are reading from two or more children
+ # we use a select.
+ #
+ my($FDread, $tarOut, $mesg);
+ vec($FDread, fileno(TAR), 1) = 1;
+ $xfer->setSelectMask(\$FDread);
+
+ SCAN: while ( 1 ) {
+ my $ein = $FDread;
+ last if ( $FDread =~ /^\0*$/ );
+ select(my $rout = $FDread, undef, $ein, undef);
+ if ( vec($rout, fileno(TAR), 1) ) {
+ if ( sysread(TAR, $mesg, 8192) <= 0 ) {
+ vec($FDread, fileno(TAR), 1) = 0;
+ close(TAR);
+ } else {
+ $tarOut .= $mesg;
+ }
+ }
+ while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) {
+ $_ = $1;
+ $tarOut = $2;
+ $XferLOG->write(\"tarExtract: $_\n");
+ if ( /^Done: (\d+) errors, (\d+) filesExist, (\d+) sizeExist, (\d+) sizeExistComp, (\d+) filesTotal, (\d+) sizeTotal/ ) {
+ $tarErrs = $1;
+ $nFilesExist = $2;
+ $sizeExist = $3;
+ $sizeExistComp = $4;
+ $nFilesTotal = $5;
+ $sizeTotal = $6;
+ }
+ }
+ last if ( !$xfer->readOutput(\$FDread, $rout) );
+ while ( my $str = $xfer->logMsgGet ) {
+ print(LOG $bpc->timeStamp, "xfer: $str\n");
+ }
+ if ( $xfer->getStats->{fileCnt} == 1 ) {
+ #
+ # Make sure it is still the machine we expect. We do this while
+ # the transfer is running to avoid a potential race condition if
+ # the ip address was reassigned by dhcp just before we started
+ # the transfer.
+ #
+ if ( my $errMsg = CorrectHostCheck($hostIP, $host) ) {
+ $stat{hostError} = $errMsg;
+ last SCAN;
+ }
+ }
+ }
+ #
+ # Merge the xfer status (need to accumulate counts)
+ #
+ my $newStat = $xfer->getStats;
+ foreach my $k ( (keys(%stat), keys(%$newStat)) ) {
+ next if ( !defined($newStat->{$k}) );
+ if ( $k =~ /Cnt$/ ) {
+ $stat{$k} += $newStat->{$k};
+ delete($newStat->{$k});
+ next;
+ }
+ if ( !defined($stat{$k}) ) {
+ $stat{$k} = $newStat->{$k};
+ delete($newStat->{$k});
+ next;
+ }
+ }
+ $stat{xferOK} = 0 if ( $stat{hostError} || $stat{hostAbort} );
+ if ( !$stat{xferOK} ) {
+ #
+ # kill off the tranfer program, first nicely then forcefully
+ #
+ kill(2, $xferPid);
+ sleep(1);
+ kill(9, $xferPid);
+ #
+ # kill off the tar process, first nicely then forcefully
+ #
+ kill(2, $tarPid);
+ sleep(1);
+ kill(9, $tarPid);
+ #
+ # don't do any more shares on this host
+ #
+ last;
+ }
+}
+$XferLOG->close();
+
+my $lastNum = -1;
+
+#
+# Do one last check to make sure it is still the machine we expect.
+#
+if ( $stat{xferOK} && (my $errMsg = CorrectHostCheck($hostIP, $host)) ) {
+ $stat{hostError} = $errMsg;
+ $stat{xferOK} = 0;
+}
+if ( $stat{xferOK} ) {
+ @Backups = $bpc->BackupInfoRead($host);
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ $lastNum = $Backups[$i]{num} if ( $lastNum < $Backups[$i]{num} );
+ }
+ $lastNum++;
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/$lastNum")
+ if ( -d "$Dir/$lastNum" );
+ if ( !rename("$Dir/new", "$Dir/$lastNum") ) {
+ print(LOG $bpc->timeStamp,
+ "Rename $Dir/new -> $Dir/$lastNum failed\n");
+ $stat{xferOK} = 0;
+ }
+ rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.$lastNum$fileExt");
+ rename("$Dir/NewFileList", "$Dir/NewFileList.$lastNum");
+}
+my $endTime = time();
+
+#
+# If the dump failed, clean up
+#
+if ( !$stat{xferOK} ) {
+ #
+ # wait a short while and see if the system is still alive
+ #
+ $stat{hostError} = $stat{lastOutputLine} if ( $stat{hostError} eq "" );
+ if ( $stat{hostError} ) {
+ print(LOG $bpc->timeStamp,
+ "Got fatal error during xfer ($stat{hostError})\n");
+ }
+ sleep(10);
+ if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+ $stat{hostAbort} = 1;
+ }
+ if ( $stat{hostAbort} ) {
+ $stat{hostError} = "lost network connection during backup";
+ }
+ print(LOG $bpc->timeStamp, "Dump aborted ($stat{hostError})\n");
+ unlink("$Dir/timeStamp.level0");
+ unlink("$Dir/SmbLOG.bad");
+ unlink("$Dir/SmbLOG.bad$fileExt");
+ unlink("$Dir/XferLOG.bad");
+ unlink("$Dir/XferLOG.bad$fileExt");
+ unlink("$Dir/NewFileList");
+ rename("$Dir/XferLOG$fileExt", "$Dir/XferLOG.bad$fileExt");
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
+ print("dump failed: $stat{hostError}\n");
+ print("link $host\n") if ( $needLink );
+ exit(1);
+}
+
+#
+# Add the new backup information to the backup file
+#
+@Backups = $bpc->BackupInfoRead($host);
+my $i = @Backups;
+$Backups[$i]{num} = $lastNum;
+$Backups[$i]{type} = $type;
+$Backups[$i]{startTime} = $startTime;
+$Backups[$i]{endTime} = $endTime;
+$Backups[$i]{size} = $sizeTotal;
+$Backups[$i]{nFiles} = $nFilesTotal;
+$Backups[$i]{xferErrs} = $stat{xferErrCnt} || 0;
+$Backups[$i]{xferBadFile} = $stat{xferBadFileCnt} || 0;
+$Backups[$i]{xferBadShare} = $stat{xferBadShareCnt} || 0;
+$Backups[$i]{nFilesExist} = $nFilesExist;
+$Backups[$i]{sizeExist} = $sizeExist;
+$Backups[$i]{sizeExistComp} = $sizeExistComp;
+$Backups[$i]{tarErrs} = $tarErrs;
+$Backups[$i]{compress} = $Conf{CompressLevel};
+$Backups[$i]{noFill} = $type eq "full" ? 0 : 1;
+$Backups[$i]{mangle} = 1; # name mangling always on for v1.04+
+$bpc->BackupInfoWrite($host, @Backups);
+
+unlink("$Dir/timeStamp.level0");
+
+#
+# Now remove the bad files, replacing them if possible with links to
+# earlier backups.
+#
+foreach my $file ( $xfer->getBadFiles ) {
+ my $j;
+ unlink("$Dir/$lastNum/$file");
+ for ( $j = $i - 1 ; $j >= 0 ; $j-- ) {
+ next if ( !-f "$Dir/$Backups[$j]{num}/$file" );
+ if ( !link("$Dir/$Backups[$j]{num}/$file", "$Dir/$lastNum/$file") ) {
+ print(LOG $bpc->timeStamp,
+ "Unable to link $lastNum/$file to"
+ . " $Backups[$j]{num}/$file\n");
+ } else {
+ print(LOG $bpc->timeStamp,
+ "Bad file $lastNum/$file replaced by link to"
+ . " $Backups[$j]{num}/$file\n");
+ }
+ last;
+ }
+ if ( $j < 0 ) {
+ print(LOG $bpc->timeStamp,
+ "Removed bad file $lastNum/$file (no older"
+ . " copy to link to)\n");
+ }
+}
+
+my $otherCount = $stat{xferErrCnt} - $stat{xferBadFileCnt}
+ - $stat{xferBadShareCnt};
+print(LOG $bpc->timeStamp,
+ "$type backup $lastNum complete, $stat{fileCnt} files,"
+ . " $stat{byteCnt} bytes,"
+ . " $stat{xferErrCnt} xferErrs ($stat{xferBadFileCnt} bad files,"
+ . " $stat{xferBadShareCnt} bad shares, $otherCount other)\n");
+
+BackupExpire($host);
+
+print("$type backup complete\n");
+
+###########################################################################
+# Subroutines
+###########################################################################
+
+sub catch_signal
+{
+ my $signame = shift;
+
+ print(LOG $bpc->timeStamp, "cleaning up after signal $signame\n");
+ if ( $xferPid > 0 ) {
+ if ( kill(2, $xferPid) <= 0 ) {
+ sleep(1);
+ kill(9, $xferPid);
+ }
+ }
+ if ( $tarPid > 0 ) {
+ if ( kill(2, $tarPid) <= 0 ) {
+ sleep(1);
+ kill(9, $tarPid);
+ }
+ }
+ unlink("$Dir/timeStamp.level0");
+ unlink("$Dir/NewFileList");
+ $bpc->RmTreeDefer("$TopDir/trash", "$Dir/new") if ( -d "$Dir/new" );
+ print("exiting after signal $signame\n");
+ print("link $host\n") if ( $needLink );
+ exit(1);
+}
+
+#
+# Decide which old backups should be expired by moving them
+# to $TopDir/trash.
+#
+sub BackupExpire
+{
+ my($host) = @_;
+ my($Dir) = "$TopDir/pc/$host";
+ my(@Backups) = $bpc->BackupInfoRead($host);
+ my($cntFull, $cntIncr, $firstFull, $firstIncr, $oldestIncr, $oldestFull);
+
+ while ( 1 ) {
+ $cntFull = $cntIncr = 0;
+ $oldestIncr = $oldestFull = 0;
+ for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ if ( $Backups[$i]{type} eq "full" ) {
+ $firstFull = $i if ( $cntFull == 0 );
+ $cntFull++;
+ } else {
+ $firstIncr = $i if ( $cntIncr == 0 );
+ $cntIncr++;
+ }
+ }
+ $oldestIncr = (time - $Backups[$firstIncr]{startTime}) / (24 * 3600)
+ if ( $cntIncr > 0 );
+ $oldestFull = (time - $Backups[$firstFull]{startTime}) / (24 * 3600)
+ if ( $cntFull > 0 );
+ if ( $cntIncr > $Conf{IncrKeepCnt}
+ || ($cntIncr > $Conf{IncrKeepCntMin}
+ && $oldestIncr > $Conf{IncrAgeMax})
+ && (@Backups <= $firstIncr + 1
+ || $Backups[$firstIncr]{noFill}
+ || !$Backups[$firstIncr + 1]{noFill}) ) {
+ #
+ # Only delete an incr backup if the Conf settings are satisfied.
+ # We also must make sure that either this backup is the most
+ # recent one, or it is not filled, or the next backup is filled.
+ # (We can't deleted a filled incr if the next backup is not
+ # filled.)
+ #
+ print(LOG $bpc->timeStamp,
+ "removing incr backup $Backups[$firstIncr]{num}\n");
+ $bpc->RmTreeDefer("$TopDir/trash",
+ "$Dir/$Backups[$firstIncr]{num}");
+ unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}")
+ if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}" );
+ unlink("$Dir/SmbLOG.$Backups[$firstIncr]{num}.z")
+ if ( -f "$Dir/SmbLOG.$Backups[$firstIncr]{num}.z" );
+ unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}")
+ if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}" );
+ unlink("$Dir/XferLOG.$Backups[$firstIncr]{num}.z")
+ if ( -f "$Dir/XferLOG.$Backups[$firstIncr]{num}.z" );
+ splice(@Backups, $firstIncr, 1);
+ } elsif ( ($cntFull > $Conf{FullKeepCnt}
+ || ($cntFull > $Conf{FullKeepCntMin}
+ && $oldestFull > $Conf{FullAgeMax}))
+ && (@Backups <= $firstFull + 1
+ || !$Backups[$firstFull + 1]{noFill}) ) {
+ #
+ # Only delete a full backup if the Conf settings are satisfied.
+ # We also must make sure that either this backup is the most
+ # recent one, or the next backup is filled.
+ # (We can't deleted a full backup if the next backup is not
+ # filled.)
+ #
+ print(LOG $bpc->timeStamp,
+ "removing full backup $Backups[$firstFull]{num}\n");
+ $bpc->RmTreeDefer("$TopDir/trash",
+ "$Dir/$Backups[$firstFull]{num}");
+ unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}")
+ if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}" );
+ unlink("$Dir/SmbLOG.$Backups[$firstFull]{num}.z")
+ if ( -f "$Dir/SmbLOG.$Backups[$firstFull]{num}.z" );
+ unlink("$Dir/XferLOG.$Backups[$firstFull]{num}")
+ if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}" );
+ unlink("$Dir/XferLOG.$Backups[$firstFull]{num}.z")
+ if ( -f "$Dir/XferLOG.$Backups[$firstFull]{num}.z" );
+ splice(@Backups, $firstFull, 1);
+ } else {
+ last;
+ }
+ }
+ $bpc->BackupInfoWrite($host, @Backups);
+}
+
+sub CorrectHostCheck
+{
+ my($hostIP, $host) = @_;
+ return if ( $hostIP eq $host && !$Conf{FixedIPNetBiosNameCheck} );
+ my($netBiosHost, $netBiosUser) = $bpc->NetBiosInfoGet($hostIP);
+ return "host $host has mismatching netbios name $netBiosHost"
+ if ( $netBiosHost ne $host );
+ return;
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_link: link new backup into pool
+#
+# DESCRIPTION
+#
+# BackupPC_link inspects every file in a new backup and
+# checks if an existing file from any previous backup is
+# identical. If so, the file is removed and replaced by
+# a hardlink to the existing file. If the file is new,
+# a hardlink to the file is made in the pool area, so that
+# this file is available for checking against future backups.
+#
+# Then, for incremental backups, hardlinks are made in the
+# backup directories to all files that were not extracted during
+# the incremental backups. The means the incremental dump looks
+# like a complete image of the PC.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::Attrib;
+use BackupPC::PoolWrite;
+
+use File::Find;
+use File::Path;
+use Digest::MD5;
+
+###########################################################################
+# Initialize
+###########################################################################
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+$bpc->ChildInit();
+
+if ( @ARGV != 1 ) {
+ print("usage: $0 <host>\n");
+ exit(1);
+}
+if ( $ARGV[0] !~ /^([\w\.-]+)$/ ) {
+ print("$0: bad host name '$ARGV[0]'\n");
+ exit(1);
+}
+my $host = $1;
+my $Dir = "$TopDir/pc/$host";
+my($CurrDumpDir, $Compress);
+
+#
+# Re-read config file, so we can include the PC-specific config
+#
+$bpc->ConfigRead($host);
+%Conf = $bpc->Conf();
+
+###########################################################################
+# Process any backups that haven't been linked
+###########################################################################
+my $md5 = Digest::MD5->new;
+my($nFilesNew, $sizeNew, $sizeNewComp);
+my($nFilesExist, $sizeExist, $sizeExistComp);
+while ( 1 ) {
+ my @Backups = $bpc->BackupInfoRead($host);
+ $nFilesNew = $sizeNew = $sizeNewComp = 0;
+ $nFilesExist = $sizeExist = $sizeExistComp = 0;
+ my($num);
+ for ( $num = 0 ; $num < @Backups ; $num++ ) {
+ last if ( $Backups[$num]{nFilesNew} eq ""
+ || -f "$Dir/NewFileList.$Backups[$num]{num}" );
+ }
+ last if ( $num >= @Backups );
+ #
+ # Process list of new files left by BackupPC_dump
+ #
+ $CurrDumpDir = "$Dir/$Backups[$num]{num}";
+ $Compress = $Backups[$num]{compress};
+ if ( open(NEW, "$Dir/NewFileList.$Backups[$num]{num}") ) {
+ while ( <NEW> ) {
+ chomp;
+ next if ( !/(\w+) (\d+) (.*)/ );
+ LinkNewFile($1, $2, "$CurrDumpDir/$3");
+ }
+ close(NEW);
+ }
+ unlink("$Dir/NewFileList.$Backups[$num]{num}")
+ if ( -f "$Dir/NewFileList.$Backups[$num]{num}" );
+
+ #
+ # See if we should fill in this dump. We only need to fill
+ # in incremental dumps. We can only fill in the incremental
+ # dump if there is an existing filled in dump with the same
+ # type of compression (on or off). Eg, we can't fill in
+ # a compressed incremental if the most recent filled in dump
+ # is not compressed.
+ #
+ my $noFill = 1;
+ my $fillFromNum;
+ if ( $Backups[$num]{type} eq "full" ) {
+ $noFill = 0
+ } elsif ( $Conf{IncrFill} ) {
+ my $i;
+ for ( $i = $num - 1 ; $i >= 0 ; $i-- ) {
+ last if ( !$Backups[$i]{noFill}
+ && ($Backups[$i]{compress} ? 1 : 0)
+ == ($Compress ? 1 : 0) );
+ }
+ my $prevDump = "$Dir/$Backups[$i]{num}";
+ if ( $i >= 0 && -d $prevDump ) {
+ find({wanted => \&FillIncr, no_chdir => 1}, $prevDump);
+ $noFill = 0;
+ $fillFromNum = $Backups[$i]{num};
+ }
+ }
+ #
+ # Update the backup info file in $TopDir/pc/$host/backups
+ #
+ @Backups = $bpc->BackupInfoRead($host);
+ $Backups[$num]{nFilesExist} += $nFilesExist;
+ $Backups[$num]{sizeExist} += $sizeExist;
+ $Backups[$num]{sizeExistComp} += $sizeExistComp;
+ $Backups[$num]{nFilesNew} += $nFilesNew;
+ $Backups[$num]{sizeNew} += $sizeNew;
+ $Backups[$num]{sizeNewComp} += $sizeNewComp;
+ $Backups[$num]{noFill} = $noFill;
+ $Backups[$num]{fillFromNum} = $fillFromNum;
+ $bpc->BackupInfoWrite($host, @Backups);
+}
+
+###########################################################################
+# Subroutines
+###########################################################################
+
+#
+# Fill in an incremental dump by making hardlinks to the previous
+# dump.
+#
+sub FillIncr
+{
+ my($name) = $File::Find::name;
+ my($newName);
+
+ $name = $1 if ( $name =~ /(.*)/ );
+ return if ( $name !~ m{\Q$Dir\E/(\d+)/(.*)} );
+ $newName = "$CurrDumpDir/$2";
+ if ( -d $name && -d $newName ) {
+ #
+ # Merge the file attributes.
+ #
+ my $newAttr = BackupPC::Attrib->new({ compress => $Compress });
+ my $attr = BackupPC::Attrib->new({ compress => $Compress });
+ $newAttr->read($newName) if ( -f $newAttr->fileName($newName) );
+ $attr->read($name) if ( -f $attr->fileName($name) );
+ $newAttr->merge($attr);
+ #
+ # Now write it out, adding a link to the pool if necessary
+ #
+ my $data = $newAttr->writeData;
+ my $origSize = length($data);
+ my $fileName = $newAttr->fileName($newName);
+ my $poolWrite = BackupPC::PoolWrite->new($bpc, $fileName,
+ length($data), $Compress);
+ $poolWrite->write(\$data);
+ my($exists, $digest, $outSize, $errs) = $poolWrite->close;
+ if ( @$errs ) {
+ print("log ", join("", @$errs));
+ }
+ if ( $exists ) {
+ $nFilesExist++;
+ $sizeExist += $origSize;
+ $sizeExistComp += $outSize;
+ } elsif ( $outSize > 0 ) {
+ $nFilesNew++;
+ $sizeNew += $origSize;
+ $sizeNewComp += -s $outSize;
+ LinkNewFile($digest, $origSize, $fileName);
+ }
+ } elsif ( -f $name && !-f $newName ) {
+ #
+ # Exists in the older filled backup, and not in the new, so link it
+ #
+ link($name, $newName);
+ }
+}
+
+#
+# Add a link in the pool to a new file
+#
+sub LinkNewFile
+{
+ my($d, $size, $fileName) = @_;
+ my $res = $bpc->MakeFileLink($fileName, $d, 1, $Compress);
+ if ( $res == 1 ) {
+ $nFilesExist++;
+ $sizeExist += $size;
+ $sizeExistComp += -s $fileName;
+ } elsif ( $res == 2 ) {
+ $nFilesNew++;
+ $sizeNew += $size;
+ $sizeNewComp += -s $fileName;
+ } elsif ( $res != 0 && $res != -1 ) {
+ print("log BackupPC_link got error $res when calling"
+ . " MakeFileLink($fileName, $d, 1)\n");
+ }
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_nightly: Nightly cleanup & statistics script.
+#
+# DESCRIPTION
+# BackupPC_nightly performs several administrative tasks:
+#
+# - monthly aging of per-PC log files
+#
+# - pruning files from pool no longer used (ie: those with only one
+# hard link).
+#
+# - sending email to users and administrators.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+
+use File::Find;
+use File::Path;
+use Data::Dumper;
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+$bpc->ChildInit();
+
+my $err = $bpc->ServerConnect($Conf{ServerHost}, $Conf{ServerPort});
+if ( $err ) {
+ print("Can't connect to server ($err)\n");
+ exit(1);
+}
+my $reply = $bpc->ServerMesg("status hosts");
+$reply = $1 if ( $reply =~ /(.*)/s );
+my(%Status, %Info, %Jobs, @BgQueue, @UserQueue, @CmdQueue);
+eval($reply);
+
+###########################################################################
+# When BackupPC_nightly starts, BackupPC will not run any simultaneous
+# BackupPC_dump commands. We first do things that contend with
+# BackupPC_dump, eg: aging per-PC log files etc.
+###########################################################################
+
+#
+# Do per-PC log file aging
+#
+my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+if ( $mday == 1 ) {
+ foreach my $host ( keys(%Status) ) {
+ my $lastLog = $Conf{MaxOldPerPCLogFiles} - 1;
+ unlink("$TopDir/pc/$host/LOG.$lastLog")
+ if ( -f "$TopDir/pc/$host/LOG.$lastLog" );
+ unlink("$TopDir/pc/$host/LOG.$lastLog.z")
+ if ( -f "$TopDir/pc/$host/LOG.$lastLog.z" );
+ for ( my $i = $lastLog - 1 ; $i >= 0 ; $i-- ) {
+ my $j = $i + 1;
+ if ( -f "$TopDir/pc/$host/LOG.$i" ) {
+ rename("$TopDir/pc/$host/LOG.$i", "$TopDir/pc/$host/LOG.$j");
+ } elsif ( -f "$TopDir/pc/$host/LOG.$i.z" ) {
+ rename("$TopDir/pc/$host/LOG.$i.z",
+ "$TopDir/pc/$host/LOG.$j.z");
+ }
+ }
+ #
+ # Compress the log file LOG -> LOG.0.z (if enabled).
+ # Otherwise, just rename LOG -> LOG.0.
+ #
+ BackupPC::FileZIO->compressCopy("$TopDir/pc/$host/LOG",
+ "$TopDir/pc/$host/LOG.0.z",
+ "$TopDir/pc/$host/LOG.0",
+ $Conf{CompressLevel}, 1);
+ open(LOG, ">$TopDir/pc/$host/LOG") && close(LOG);
+ }
+}
+
+###########################################################################
+# Get statistics on the pool, and remove files that have only one link.
+###########################################################################
+
+my $fileCnt; # total number of files
+my $dirCnt; # total number of directories
+my $blkCnt; # total block size of files
+my $fileCntRm; # total number of removed files
+my $blkCntRm; # total block size of removed files
+my $blkCnt2; # total block size of files with just 2 links
+ # (ie: files that only occur once among all backups)
+my $fileCntRep; # total number of file names containing "_", ie: files
+ # that have repeated md5 checksums
+my $fileRepMax; # worse case number of files that have repeated checksums
+ # (ie: max(nnn+1) for all names xxxxxxxxxxxxxxxx_nnn)
+my $fileCntRename; # number of renamed files (to keep file numbering
+ # contiguous)
+my %FixList; # list of paths that need to be renamed to avoid
+ # new holes
+for my $pool ( qw(pool cpool) ) {
+ $fileCnt = 0;
+ $dirCnt = 0;
+ $blkCnt = 0;
+ $fileCntRm = 0;
+ $blkCntRm = 0;
+ $blkCnt2 = 0;
+ $fileCntRep = 0;
+ $fileRepMax = 0;
+ $fileCntRename = 0;
+ %FixList = ();
+ find({wanted => \&GetPoolStats, no_chdir => 1}, "$TopDir/$pool");
+ my $kb = $blkCnt / 2;
+ my $kbRm = $blkCntRm / 2;
+ my $kb2 = $blkCnt2 / 2;
+
+ #
+ # Now make sure that files with repeated checksums are still
+ # sequentially numbered
+ #
+ foreach my $name ( sort(keys(%FixList)) ) {
+ my $rmCnt = $FixList{$name} + 1;
+ my $new = -1;
+ for ( my $old = -1 ; ; $old++ ) {
+ my $oldName = $name;
+ $oldName .= "_$old" if ( $old >= 0 );
+ if ( !-f $oldName ) {
+ #
+ # We know we are done when we have missed at least
+ # the number of files that were removed from this
+ # base name, plus a couple just to be sure
+ #
+ last if ( $rmCnt-- <= 0 );
+ next;
+ }
+ my $newName = $name;
+ $newName .= "_$new" if ( $new >= 0 );
+ $new++;
+ next if ( $oldName eq $newName );
+ rename($oldName, $newName);
+ $fileCntRename++;
+ }
+ }
+ print("BackupPC_stats = $pool,$fileCnt,$dirCnt,$kb,$kb2,$kbRm,$fileCntRm,"
+ . "$fileCntRep,$fileRepMax,$fileCntRename\n");
+}
+
+###########################################################################
+# Tell BackupPC that it is now ok to start running BackupPC_dump
+# commands. We are guaranteed that no BackupPC_link commands will
+# run since only a single CmdQueue command runs at a time, and
+# that means we are safe.
+###########################################################################
+printf("BackupPC_nightly lock_off\n");
+
+###########################################################################
+# Send email
+###########################################################################
+system("$BinDir/BackupPC_sendEmail");
+
+sub GetPoolStats
+{
+ my($name) = $File::Find::name;
+ my($baseName) = "";
+ my(@s);
+
+ return if ( !-d && !-f );
+ $dirCnt += -d;
+ $name = $1 if ( $name =~ /(.*)/ );
+ @s = stat($name);
+ if ( $name =~ /(.*)_(\d+)$/ ) {
+ $baseName = $1;
+ if ( $s[3] != 1 ) {
+ $fileRepMax = $2 + 1 if ( $fileRepMax <= $2 );
+ $fileCntRep++;
+ }
+ }
+ if ( -f && $s[3] == 1 ) {
+ $blkCntRm += $s[12];
+ $fileCntRm++;
+ unlink($name);
+ #
+ # We must keep repeated files numbered sequential (ie: files
+ # that have the same checksum are appended with _0, _1 etc).
+ # There are two cases: we remove the base file xxxx, but xxxx_0
+ # exists, or we remove any file of the form xxxx_nnn. We remember
+ # the base name and fix it up later (not in the middle of find).
+ #
+ $baseName = $name if ( $baseName eq "" );
+ $FixList{$baseName}++;
+ } else {
+ $fileCnt += -f;
+ $blkCnt += $s[12];
+ $blkCnt2 += $s[12] if ( -f && $s[3] == 2 );
+ }
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_restore: Restore files to a client.
+#
+# DESCRIPTION
+#
+# Usage: BackupPC_restore <hostIP> <host> <reqFileName>
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+use BackupPC::Xfer::Smb;
+use BackupPC::Xfer::Tar;
+
+use File::Path;
+use Getopt::Std;
+
+use vars qw( %RestoreReq );
+
+###########################################################################
+# Initialize
+###########################################################################
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+my($hostIP, $host, $reqFileName);
+
+$bpc->ChildInit();
+
+if ( @ARGV != 3 ) {
+ print("usage: $0 <hostIP> <host> <reqFileName>\n");
+ exit(1);
+}
+$hostIP = $1 if ( $ARGV[0] =~ /(.+)/ );
+$host = $1 if ( $ARGV[1] =~ /(.+)/ );
+if ( $ARGV[2] !~ /^([\w.]+)$/ ) {
+ print("$0: bad reqFileName (arg #3): $ARGV[2]\n");
+ exit(1);
+}
+$reqFileName = $1;
+
+my $Hosts = $bpc->HostInfoRead();
+
+#
+# Re-read config file, so we can include the PC-specific config
+#
+$bpc->ConfigRead($host);
+%Conf = $bpc->Conf();
+
+my $Dir = "$TopDir/pc/$host";
+my $xferPid = -1;
+my $tarPid = -1;
+
+#
+# Catch various signals
+#
+$SIG{INT} = \&catch_signal;
+$SIG{ALRM} = \&catch_signal;
+$SIG{TERM} = \&catch_signal;
+
+#
+# Read the request file
+#
+if ( !(my $ret = do "$Dir/$reqFileName") ) {
+ die "couldn't parse $Dir/$reqFileName: $@" if $@;
+ die "couldn't do $Dir/$reqFileName: $!" unless defined $ret;
+ die "couldn't run $Dir/$reqFileName";
+}
+
+#
+# Make sure we eventually timeout if there is no activity from
+# the data transport program.
+#
+alarm($Conf{SmbClientTimeout});
+
+mkpath($Dir, 0, 0777) if ( !-d $Dir );
+if ( !-f "$Dir/LOCK" ) {
+ open(LOCK, ">$Dir/LOCK") && close(LOCK);
+}
+open(LOG, ">>$Dir/LOG");
+select(LOG); $| = 1; select(STDOUT);
+
+#
+# Check if $host is alive
+#
+my $delay = $bpc->CheckHostAlive($hostIP);
+if ( $delay < 0 ) {
+ print(LOG $bpc->timeStamp, "no ping response\n");
+ print("no ping response\n");
+ exit(1);
+} elsif ( $delay > $Conf{PingMaxMsec} ) {
+ printf(LOG "%sping too slow: %.4gmsec\n", $bpc->timeStamp, $delay);
+ printf("ping too slow: %.4gmsec (threshold is %gmsec)\n",
+ $delay, $Conf{PingMaxMsec});
+ exit(1);
+}
+
+#
+# Make sure it is really the machine we expect
+#
+if ( (my $errMsg = CorrectHostCheck($hostIP, $host)) ) {
+ print(LOG $bpc->timeStamp, "restore failed: $errMsg\n");
+ print("restore failed: $errMsg\n");
+ exit(1);
+}
+
+#
+# Setup file extension for compression and open RestoreLOG output file
+#
+$Conf{CompressLevel} = 0 if ( !BackupPC::FileZIO->compOk );
+my $fileExt = $Conf{CompressLevel} > 0 ? ".z" : "";
+my $RestoreLOG = BackupPC::FileZIO->open("$Dir/RestoreLOG$fileExt", 1,
+ $Conf{CompressLevel});
+my $startTime = time();
+
+my $tarCreateFileCnt = 0;
+my $tarCreateByteCnt = 0;
+my $tarCreateErrCnt = 1; # assume not ok until we learn otherwise
+my $tarCreateErr;
+my($logMsg, %stat, $xfer);
+
+#
+# Now do the restore
+#
+local(*RH, *WH);
+
+$stat{xferOK} = $stat{hostAbort} = undef;
+$stat{hostError} = $stat{lastOutputLine} = undef;
+
+#
+# Create a pipe to connect BackupPC_tarCreate to the transport program
+# (smbclient, tar, etc).
+# WH is the write handle for writing, provided to BackupPC_tarCreate
+# and RH is the other end of the pipe for reading provided to the
+# transport program.
+#
+pipe(RH, WH);
+
+#
+# Run the transport program, which reads from RH and extracts the data.
+#
+my $xferArgs = {
+ host => $host,
+ hostIP => $hostIP,
+ type => "restore",
+ shareName => $RestoreReq{shareDest},
+ pipeRH => *RH,
+ pipeWH => *WH,
+ XferLOG => $RestoreLOG,
+};
+if ( $Conf{XferMethod} eq "tar" ) {
+ #
+ # Use tar (eg: tar/ssh) as the transport program.
+ #
+ $xfer = BackupPC::Xfer::Tar->new($bpc, $xferArgs);
+} else {
+ #
+ # Default is to use smbclient (smb) as the transport program.
+ #
+ $xfer = BackupPC::Xfer::Smb->new($bpc, $xferArgs);
+}
+if ( !defined($logMsg = $xfer->start()) ) {
+ print(LOG $bpc->timeStamp, $xfer->errStr, "\n");
+ print($xfer->errStr, "\n");
+ exit(1);
+}
+#
+# The parent must close the read handle since the transport program
+# is using it.
+#
+close(RH);
+
+#
+# fork a child for BackupPC_tarCreate. TAR is a file handle
+# on which we (the parent) read the stderr from BackupPC_tarCreate.
+#
+my @tarPathOpts;
+if ( defined($RestoreReq{pathHdrDest})
+ && $RestoreReq{pathHdrDest} ne $RestoreReq{pathHdrSrc} ) {
+ @tarPathOpts = ("-r", $RestoreReq{pathHdrSrc},
+ "-p", $RestoreReq{pathHdrDest}
+ );
+}
+my @tarArgs = (
+ "-h", $RestoreReq{hostSrc},
+ "-n", $RestoreReq{num},
+ "-s", $RestoreReq{shareSrc},
+ "-t",
+ @tarPathOpts,
+ @{$RestoreReq{fileList}},
+);
+my $logMsg = "Running: $BinDir/BackupPC_tarCreate "
+ . join(" ", @tarArgs) . "\n";
+$RestoreLOG->write(\$logMsg);
+if ( !defined($tarPid = open(TAR, "-|")) ) {
+ print(LOG $bpc->timeStamp, "can't fork to run tar\n");
+ print("can't fork to run tar\n");
+ close(WH);
+ # FIX: need to cleanup xfer
+ exit(0);
+}
+if ( !$tarPid ) {
+ #
+ # This is the tarCreate child. Clone STDERR to STDOUT,
+ # STDOUT to WH, and then exec BackupPC_tarCreate.
+ #
+ setpgrp 0,0;
+ close(STDERR);
+ open(STDERR, ">&STDOUT");
+ close(STDOUT);
+ open(STDOUT, ">&WH");
+ exec("$BinDir/BackupPC_tarCreate", @tarArgs);
+ print(LOG $bpc->timeStamp, "can't exec $BinDir/BackupPC_tarCreate\n");
+ # FIX: need to cleanup xfer
+ exit(0);
+}
+#
+# The parent must close the write handle since BackupPC_tarCreate
+# is using it.
+#
+close(WH);
+
+$xferPid = $xfer->xferPid;
+print(LOG $bpc->timeStamp, $logMsg, " (tarPid=$tarPid, xferPid=$xferPid)\n");
+print("started restore, tarPid=$tarPid, xferPid=$xferPid\n");
+
+#
+# Parse the output of the transfer program and BackupPC_tarCreate
+# while they run. Since we are reading from two or more children
+# we use a select.
+#
+my($FDread, $tarOut, $mesg);
+vec($FDread, fileno(TAR), 1) = 1;
+$xfer->setSelectMask(\$FDread);
+
+SCAN: while ( 1 ) {
+ my $ein = $FDread;
+ last if ( $FDread =~ /^\0*$/ );
+ select(my $rout = $FDread, undef, $ein, undef);
+ if ( vec($rout, fileno(TAR), 1) ) {
+ if ( sysread(TAR, $mesg, 8192) <= 0 ) {
+ vec($FDread, fileno(TAR), 1) = 0;
+ if ( !close(TAR) ) {
+ $tarCreateErrCnt = 1;
+ $tarCreateErr = "BackupPC_tarCreate failed";
+ }
+ } else {
+ $tarOut .= $mesg;
+ }
+ }
+ while ( $tarOut =~ /(.*?)[\n\r]+(.*)/s ) {
+ $_ = $1;
+ $tarOut = $2;
+ $RestoreLOG->write(\"tarCreate: $_\n");
+ if ( /^Done: (\d+) files, (\d+) bytes, (\d+) dirs, (\d+) specials, (\d+) errors/ ) {
+ $tarCreateFileCnt = $1;
+ $tarCreateByteCnt = $2;
+ $tarCreateErrCnt = $5;
+ }
+ }
+ last if ( !$xfer->readOutput(\$FDread, $rout) );
+ while ( my $str = $xfer->logMsgGet ) {
+ print(LOG $bpc->timeStamp, "xfer: $str\n");
+ }
+ if ( $xfer->getStats->{fileCnt} == 1 ) {
+ #
+ # Make sure it is still the machine we expect. We do this while
+ # the transfer is running to avoid a potential race condition if
+ # the ip address was reassigned by dhcp just before we started
+ # the transfer.
+ #
+ if ( my $errMsg = CorrectHostCheck($hostIP, $host) ) {
+ $stat{hostError} = $errMsg;
+ last SCAN;
+ }
+ }
+}
+
+#
+# Merge the xfer status (need to accumulate counts)
+#
+my $newStat = $xfer->getStats;
+foreach my $k ( (keys(%stat), keys(%$newStat)) ) {
+ next if ( !defined($newStat->{$k}) );
+ if ( $k =~ /Cnt$/ ) {
+ $stat{$k} += $newStat->{$k};
+ delete($newStat->{$k});
+ next;
+ }
+ if ( !defined($stat{$k}) ) {
+ $stat{$k} = $newStat->{$k};
+ delete($newStat->{$k});
+ next;
+ }
+}
+$RestoreLOG->close();
+$stat{xferOK} = 0 if ( $stat{hostError} || $stat{hostAbort} || $tarCreateErr );
+
+if ( !$stat{xferOK} ) {
+ #
+ # kill off the tranfer program, first nicely then forcefully
+ #
+ kill(2, $xferPid);
+ sleep(1);
+ kill(9, $xferPid);
+ #
+ # kill off the tar process, first nicely then forcefully
+ #
+ kill(2, $tarPid);
+ sleep(1);
+ kill(9, $tarPid);
+}
+
+my $lastNum = -1;
+my @Restores;
+
+#
+# Do one last check to make sure it is still the machine we expect.
+#
+if ( $stat{xferOK} && (my $errMsg = CorrectHostCheck($hostIP, $host)) ) {
+ $stat{hostError} = $errMsg;
+ $stat{xferOK} = 0;
+}
+@Restores = $bpc->RestoreInfoRead($host);
+for ( my $i = 0 ; $i < @Restores ; $i++ ) {
+ $lastNum = $Restores[$i]{num} if ( $lastNum < $Restores[$i]{num} );
+}
+$lastNum++;
+rename("$Dir/RestoreLOG$fileExt", "$Dir/RestoreLOG.$lastNum$fileExt");
+rename("$Dir/$reqFileName", "$Dir/RestoreInfo.$lastNum");
+my $endTime = time();
+
+#
+# If the restore failed, clean up
+#
+if ( !$stat{xferOK} ) {
+ #
+ # wait a short while and see if the system is still alive
+ #
+ $stat{hostError} ||= $tarCreateErr if ( $tarCreateErr ne "" );
+ $stat{hostError} = $stat{lastOutputLine} if ( $stat{hostError} eq "" );
+ if ( $stat{hostError} ) {
+ print(LOG $bpc->timeStamp,
+ "Got fatal error during xfer ($stat{hostError})\n");
+ }
+ sleep(2);
+ if ( $bpc->CheckHostAlive($hostIP) < 0 ) {
+ $stat{hostAbort} = 1;
+ }
+ if ( $stat{hostAbort} ) {
+ $stat{hostError} = "lost network connection during restore";
+ }
+}
+
+#
+# Add the new restore information to the restore file
+#
+@Restores = $bpc->RestoreInfoRead($host);
+my $i = @Restores;
+$Restores[$i]{num} = $lastNum;
+$Restores[$i]{startTime} = $startTime;
+$Restores[$i]{endTime} = $endTime;
+$Restores[$i]{result} = $stat{xferOK} ? "ok" : "failed";
+$Restores[$i]{errorMsg} = $stat{hostError};
+$Restores[$i]{nFiles} = $tarCreateFileCnt;
+$Restores[$i]{size} = $tarCreateByteCnt;
+$Restores[$i]{tarCreateErrs} = $tarCreateErrCnt;
+$Restores[$i]{xferErrs} = $stat{xferErrCnt} || 0;
+
+while ( @Restores > $Conf{RestoreInfoKeepCnt} ) {
+ my $num = $Restores[0]{num};
+ unlink("$Dir/RestoreLOG.$num.z");
+ unlink("$Dir/RestoreLOG.$num");
+ unlink("$Dir/RestoreInfo.$num");
+ shift(@Restores);
+}
+$bpc->RestoreInfoWrite($host, @Restores);
+
+if ( !$stat{xferOK} ) {
+ print(LOG $bpc->timeStamp, "Restore aborted ($stat{hostError})\n");
+ print("restore failed: $stat{hostError}\n");
+} else {
+ print("restore complete\n");
+}
+
+###########################################################################
+# Subroutines
+###########################################################################
+
+sub CorrectHostCheck
+{
+ my($hostIP, $host) = @_;
+ return if ( $hostIP eq $host && !$Conf{FixedIPNetBiosNameCheck} );
+ my($netBiosHost, $netBiosUser) = $bpc->NetBiosInfoGet($hostIP);
+ return "host $host has mismatching netbios name $netBiosHost"
+ if ( $netBiosHost ne $host );
+ return;
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_sendEmail: send status emails to users and admins
+#
+# DESCRIPTION
+#
+# BackupPC_sendEmail: send status emails to users and admins.
+# BackupPC_sendEmail is run by BackupPC_nightly, so it runs
+# once every night.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+
+use Data::Dumper;
+use Getopt::Std;
+use DirHandle ();
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+$bpc->ChildInit();
+
+use vars qw(%UserEmailInfo);
+do "$TopDir/log/UserEmailInfo.pl";
+
+my %opts;
+getopts("t", \%opts);
+if ( @ARGV != 0 ) {
+ print("usage: $0 [-t]\n");
+ exit(1);
+}
+
+my $err = $bpc->ServerConnect($Conf{ServerHost}, $Conf{ServerPort});
+if ( $err ) {
+ print("Can't connect to server ($err)\n");
+ exit(1);
+}
+my $reply = $bpc->ServerMesg("status hosts");
+$reply = $1 if ( $reply =~ /(.*)/s );
+my(%Status, %Info, %Jobs, @BgQueue, @UserQueue, @CmdQueue);
+eval($reply);
+
+###########################################################################
+# Generate sysadmin warning messages
+###########################################################################
+my $mesg = "";
+
+my @badHosts = ();
+foreach my $host ( sort(keys(%Status)) ) {
+ next if ( $Status{$host}{reason} ne "backup failed"
+ || $Status{$host}{error} =~ /^lost network connection to host/ );
+ push(@badHosts, "$host ($Status{$host}{error})");
+}
+if ( @badHosts ) {
+ my $badHosts = join("\n - ", sort(@badHosts));
+ $mesg .= <<EOF;
+The following hosts had an error that is probably caused by a
+misconfiguration. Please fix these hosts:
+ - $badHosts
+
+EOF
+}
+
+#
+# Report if we skipped backups because the disk was too full
+#
+if ( $Info{DUDailySkipHostCntPrev} > 0 ) {
+ my $n = $Info{DUDailySkipHostCntPrev};
+ my $m = $Conf{DfMaxUsagePct};
+ $mesg .= <<EOF;
+Yesterday $n hosts were skipped because the file system containing
+$TopDir was too full. The threshold in the
+configuration file is $m%, while yesterday the file system was
+up to $Info{DUDailyMaxPrev}% full. Please find more space on the file system,
+or reduce the number of full or incremental backups that we keep.
+
+EOF
+}
+
+#
+# Check for bogus directories (probably PCs that are no longer
+# on the backup list)
+#
+my $d = DirHandle->new("$TopDir/pc") or die("Can't read $TopDir/pc: $!");
+my @oldDirs = ();
+my @files = $d->read;
+$d->close;
+foreach my $host ( @files ) {
+ next if ( $host eq "." || $host eq ".." || defined($Status{$host}) );
+ push(@oldDirs, "$TopDir/pc/$host");
+}
+if ( @oldDirs ) {
+ my $oldDirs = join("\n - ", sort(@oldDirs));
+ $mesg .= <<EOF;
+The following directories are bogus and are not being used by
+BackupPC. This typically happens when PCs are removed from the
+backup list. If you don't need any old backups from these PCs you
+should remove these directories. If there are machines on this
+list that should be backed up then there is a problem with the
+hosts file:
+ - $oldDirs
+
+EOF
+}
+
+if ( $mesg ne "" && $Conf{EMailAdminUserName} ne "" ) {
+ $mesg = <<EOF;
+To: $Conf{EMailAdminUserName}
+Subject: BackupPC administrative attention needed
+
+${mesg}Regards,
+PC Backup Genie
+EOF
+ if ( $opts{t} ) {
+ print("#" x 75, "\n");
+ print $mesg;
+ } else {
+ SendMail($mesg);
+ }
+}
+
+###########################################################################
+# Generate per-host warning messages sent to each user
+###########################################################################
+my $Hosts = $bpc->HostInfoRead();
+
+foreach my $host ( sort(keys(%Status)) ) {
+ next if ( $Hosts->{$host}{user} eq "" );
+ #
+ # read any per-PC config settings (allowing per-PC email settings)
+ #
+ $bpc->ConfigRead($host);
+ %Conf = $bpc->Conf();
+ my $user = $Hosts->{$host}{user};
+ next if ( time - $UserEmailInfo{$user}{lastTime}
+ < $Conf{EMailNotifyMinDays} * 24*3600 );
+ my @Backups = $bpc->BackupInfoRead($host);
+ my $numBackups = @Backups;
+ if ( $numBackups == 0 ) {
+ my $subj = "BackupPC: no backups of $host have succeeded";
+ sendUserEmail($user, $host, $Conf{EMailNoBackupEverMesg}, $subj, {
+ userName => user2name($user)
+ }) if ( !defined($Jobs{$host}) );
+ next;
+ }
+ my $last = my $lastFull = my $lastIncr = 0;
+ my $lastGoodOutlook = 0;
+ my $lastNum = -1;
+ my $numBadOutlook = 0;
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ my $fh;
+ $lastNum = $Backups[$i]{num} if ( $lastNum < $Backups[$i]{num} );
+ if ( $Backups[$i]{type} eq "full" ) {
+ $lastFull = $Backups[$i]{startTime}
+ if ( $lastFull < $Backups[$i]{startTime} );
+ } else {
+ $lastIncr = $Backups[$i]{startTime}
+ if ( $lastIncr < $Backups[$i]{startTime} );
+ }
+ $last = $Backups[$i]{startTime}
+ if ( $last < $Backups[$i]{startTime} );
+ my $badOutlook = 0;
+ my $file = "$TopDir/pc/$host/SmbLOG.$Backups[$i]{num}";
+ my $comp = 0;
+ if ( !-f $file ) {
+ $file = "$TopDir/pc/$host/XferLOG.$Backups[$i]{num}";
+ if ( !-f $file ) {
+ $comp = 1;
+ $file = "$TopDir/pc/$host/SmbLOG.$Backups[$i]{num}.z";
+ $file = "$TopDir/pc/$host/XferLOG.$Backups[$i]{num}.z"
+ if ( !-f $file );
+ }
+ }
+ next if ( !defined($fh = BackupPC::FileZIO->open($file, 0, $comp)) );
+ while ( 1 ) {
+ my $s = $fh->readLine();
+ last if ( $s eq "" );
+ if ( $s =~ /^Error reading file.*\.pst : ERRDOS - ERRlock/
+ || $s =~ /^Error reading file.*\.pst\. Got 0 bytes/ ) {
+ $badOutlook = 1;
+ last;
+ }
+ }
+ $fh->close();
+ $numBadOutlook += $badOutlook;
+ if ( !$badOutlook ) {
+ $lastGoodOutlook = $Backups[$i]{startTime}
+ if ( $lastGoodOutlook < $Backups[$i]{startTime} );
+ }
+ }
+ if ( time - $last > $Conf{EMailNotifyOldBackupDays} * 24*3600 ) {
+ my $subj = "BackupPC: no recent backups on $host";
+ my $firstTime = sprintf("%.1f",
+ (time - $Backups[0]{startTime}) / (24*3600));
+ my $days = sprintf("%.1f", (time - $last) / (24 * 3600));
+ sendUserEmail($user, $host, $Conf{EMailNoBackupRecentMesg}, $subj, {
+ firstTime => $firstTime,
+ days => $days,
+ userName => user2name($user),
+ numBackups => $numBackups,
+ }) if ( !defined($Jobs{$host}) );
+ next;
+ }
+ if ( $numBadOutlook > 0
+ && time - $lastGoodOutlook > $Conf{EMailNotifyOldOutlookDays}
+ * 24 * 3600 ) {
+ my($days, $howLong);
+ if ( $lastGoodOutlook == 0 ) {
+ $howLong = "not been backed up successfully";
+ } else {
+ $days = sprintf("%.1f", (time - $lastGoodOutlook) / (24*3600));
+ $howLong = "not been backed up for $days days";
+ }
+ my $subj = "BackupPC: Outlook files on $host need to be backed up";
+ my $firstTime = sprintf("%.1f",
+ (time - $Backups[0]{startTime}) / (24*3600));
+ my $lastTime = sprintf("%.1f",
+ (time - $Backups[$#Backups]{startTime}) / (24*3600));
+ sendUserEmail($user, $host, $Conf{EMailOutlookBackupMesg}, $subj, {
+ firstTime => $firstTime,
+ lastTime => $lastTime,
+ numBackups => $numBackups,
+ userName => user2name($user),
+ howLong => $howLong,
+ }) if ( !defined($Jobs{$host}) );
+ }
+}
+if ( !$opts{t} ) {
+ $Data::Dumper::Indent = 1;
+ my $dumpStr = Data::Dumper->Dump(
+ [\%UserEmailInfo],
+ [qw(*UserEmailInfo)]);
+ if ( open(HOST, ">$TopDir/log/UserEmailInfo.pl") ) {
+ print(HOST $dumpStr);
+ close(HOST);
+ }
+}
+
+sub user2name
+{
+ my($user) = @_;
+ my($name) = (getpwnam($user))[6];
+ $name =~ s/\s.*//;
+ $name = $user if ( $name eq "" );
+ return $name;
+}
+
+sub sendUserEmail
+{
+ my($user, $host, $mesg, $subj, $vars) = @_;
+ $vars->{user} = $user;
+ $vars->{host} = $host;
+ $vars->{subj} = $subj;
+ $mesg =~ s/\$(\w+)/defined($vars->{$1}) ? $vars->{$1} : \$$1/eg;
+ if ( $opts{t} ) {
+ print("#" x 75, "\n");
+ print $mesg;
+ } else {
+ SendMail($mesg);
+ }
+ $UserEmailInfo{$user}{lastTime} = time;
+ $UserEmailInfo{$user}{lastSubj} = $subj;
+ $UserEmailInfo{$user}{lastHost} = $host;
+}
+
+sub SendMail
+{
+ my($mesg) = @_;
+ my($from) = $Conf{EMailFromUserName};
+ local(*MAIL);
+
+ $from = "-f $from" if ( $from ne "" );
+ if ( !open(MAIL, "|$Conf{SendmailPath} -t $from") ) {
+ printf("Can't run sendmail ($Conf{SendmailPath}): $!\n");
+ return;
+ }
+ print MAIL $mesg;
+ close(MAIL);
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_serverMesg: Send one or more commands to the BackupPC server.
+#
+# DESCRIPTION
+# As of v1.5.0 the BackupPC server communicates via a unix or internet
+# domain socket. Every message is protected with an MD5 digest, based
+# on a shared secret, a sequence number, and a per-connection unique
+# key. This minimizes the risk of an attacked issuing fake commands
+# to the BackupPC server.
+#
+# Previously, telnet could be used to talk to the BackupPC server.
+# As of v1.5.0 that is no longer possible.
+#
+# This script replaces telnet as a mechanism for sending BackupPC
+# messages. Usage:
+#
+# BackupPC_serverMesg mesg
+#
+# Example:
+# BackupPC_serverMesg status info
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+
+use File::Find;
+use File::Path;
+use Data::Dumper;
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+$bpc->ChildInit();
+
+if ( !@ARGV ) {
+ print("usage: $0 mesg\n");
+ exit(1);
+}
+
+my $err = $bpc->ServerConnect($Conf{ServerHost}, $Conf{ServerPort});
+if ( $err ) {
+ print("Can't connect to server ($err)\n");
+ exit(1);
+}
+my $reply = $bpc->ServerMesg(join(" ", @ARGV));
+print("Got reply: $reply");
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_tarCreate: create a tar archive of an existing dump
+# for restore on a client.
+#
+# DESCRIPTION
+#
+# Usage: BackupPC_tarCreate [-t] [-h host] [-n dumpNum] [-s shareName]
+# [-r pathRemove] [-p pathAdd] files/directories...
+#
+# Flags:
+# Required options:
+#
+# -h host host from which the tar archive is created
+# -n dumpNum dump number from which the tar archive is created
+# -s shareName share name from which the tar archive is created
+#
+# Other options:
+# -t print summary totals
+# -r pathRemove path prefix that will be replaced with pathAdd
+# -p pathAdd new path prefix
+#
+# The -h, -n and -s options specify which dump is used to generate
+# the tar archive. The -r and -p options can be used to relocate
+# the paths in the tar archive so extracted files can be placed
+# in a location different from their original location.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use File::Path;
+use Getopt::Std;
+use BackupPC::Lib;
+use BackupPC::Attrib qw(:all);
+use BackupPC::FileZIO;
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+my %opts;
+getopts("th:n:p:r:s:", \%opts);
+
+if ( @ARGV < 1 ) {
+ print(STDERR "usage: $0 [-t] [-h host] [-n dumpNum] [-s shareName]"
+ . " [-r pathRemove] [-p pathAdd]"
+ . " files/directories...\n");
+ exit(1);
+}
+
+if ( $opts{h} !~ /^([\w\.-]+)$/ ) {
+ print(STDERR "$0: bad host name '$opts{h}'\n");
+ exit(1);
+}
+my $Host = $opts{h};
+
+if ( $opts{n} !~ /^(\d+)$/ ) {
+ print(STDERR "$0: bad dump number '$opts{n}'\n");
+ exit(1);
+}
+my $Num = $opts{n};
+
+my @Backups = $bpc->BackupInfoRead($Host);
+my($Compress, $Mangle, $CompressF, $MangleF, $NumF, $i);
+my $FileCnt = 0;
+my $ByteCnt = 0;
+my $DirCnt = 0;
+my $SpecialCnt = 0;
+my $ErrorCnt = 0;
+
+for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ if ( !$Backups[$i]{noFill} ) {
+ #
+ # Remember the most recent filled backup
+ #
+ $NumF = $Backups[$i]{num};
+ $MangleF = $Backups[$i]{mangle};
+ $CompressF = $Backups[$i]{compress};
+ }
+ next if ( $Backups[$i]{num} != $Num );
+ $Compress = $Backups[$i]{compress};
+ $Mangle = $Backups[$i]{mangle};
+ if ( !$Backups[$i]{noFill} ) {
+ # no need to back-fill a filled backup
+ $NumF = $MangleF = $CompressF = undef;
+ }
+ last;
+}
+if ( $i >= @Backups ) {
+ print(STDERR "$0: bad backup number $Num for host $Host\n");
+ exit(1);
+}
+
+my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ );
+my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ );
+if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ ) {
+ print(STDERR "$0: bad share name '$opts{s}'\n");
+ exit(1);
+}
+my $ShareNameOrig = $opts{s};
+my $ShareName = $Mangle ? $bpc->fileNameEltMangle($ShareNameOrig)
+ : $ShareNameOrig;
+my $ShareNameF = $MangleF ? $bpc->fileNameEltMangle($ShareNameOrig)
+ : $ShareNameOrig;
+
+#
+# This constant and the line of code below that uses it are borrowed
+# from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander.
+# See www.cpan.org.
+#
+# Archive::Tar is Copyright 1997 Calle Dybedahl. All rights reserved.
+# Copyright 1998 Stephen Zander. All rights reserved.
+#
+my $tar_pack_header
+ = 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a6 a2 a32 a32 a8 a8 a155 x12';
+my $tar_header_length = 512;
+
+my $BufSize = 1048576; # 1MB or 2^20
+my $WriteBuf = "";
+my $WriteBufSz = 20 * $tar_header_length;
+
+my(%UidCache, %GidCache);
+my(%HardLinkExtraFiles, @HardLinks);
+
+#
+# Write out all the requested files/directories
+#
+my $fh = *STDOUT;
+foreach my $dir ( @ARGV ) {
+ archiveWrite($fh, $dir);
+}
+
+#
+# Write out any hardlinks (if any)
+#
+foreach my $hdr ( @HardLinks ) {
+ $hdr->{size} = 0;
+ if ( defined($PathRemove)
+ && substr($hdr->{linkname}, 0, length($PathRemove)+1)
+ eq ".$PathRemove" ) {
+ substr($hdr->{linkname}, 0, length($PathRemove)+1) = ".$PathAdd";
+ }
+ TarWriteFileInfo($fh, $hdr);
+}
+
+#
+# Finish with two null 512 byte headers, and then round out a full
+# block.
+#
+my $data = "\0" x ($tar_header_length * 2);
+TarWrite($fh, \$data);
+TarWrite($fh, undef);
+
+#
+# print out totals if requested
+#
+if ( $opts{t} ) {
+ print STDERR "Done: $FileCnt files, $ByteCnt bytes, $DirCnt dirs,",
+ " $SpecialCnt specials, $ErrorCnt errors\n";
+}
+exit(0);
+
+###########################################################################
+# Subroutines
+###########################################################################
+
+sub archiveWrite
+{
+ my($fh, $dir, $tarPathOverride) = @_;
+ if ( $dir =~ m{(^|/)\.\.(/|$)} || $dir !~ /^(.*)$/ ) {
+ print(STDERR "$0: bad directory '$dir'\n");
+ $ErrorCnt++;
+ next;
+ }
+ (my $DirOrig = $1) =~ s{/+$}{};
+ $DirOrig =~ s{^\.?/+}{};
+ my($Dir, $DirF, $FullPath, $FullPathF);
+ if ( $DirOrig eq "" ) {
+ $Dir = $DirF = "";
+ $FullPath = "$TopDir/pc/$Host/$Num/$ShareName";
+ $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF"
+ if ( defined($NumF) );
+ } else {
+ $Dir = $Mangle ? $bpc->fileNameMangle($DirOrig) : $DirOrig;
+ $DirF = $MangleF ? $bpc->fileNameMangle($DirOrig) : $DirOrig;
+ $FullPath = "$TopDir/pc/$Host/$Num/$ShareName/$Dir";
+ $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF/$DirF"
+ if ( defined($NumF) );
+ }
+ if ( -f $FullPath ) {
+ TarWriteFile($fh, $FullPath, $Mangle, $Compress, $tarPathOverride);
+ } elsif ( -d $FullPath || (defined($NumF) && -d $FullPathF) ) {
+ MergeFind($fh, $FullPath, $FullPathF);
+ } elsif ( defined($NumF) && -f $FullPathF ) {
+ TarWriteFile($fh, $FullPathF, $MangleF, $CompressF, $tarPathOverride);
+ } else {
+ print(STDERR "$0: $Host, backup $Num, doesn't have a directory or file"
+ . " $ShareNameOrig/$DirOrig\n");
+ $ErrorCnt++;
+ }
+}
+
+sub UidLookup
+{
+ my($uid) = @_;
+
+ $UidCache{$uid} = (getpwuid($uid))[0] if ( !exists($UidCache{$uid}) );
+ return $UidCache{$uid};
+}
+
+sub GidLookup
+{
+ my($gid) = @_;
+
+ $GidCache{$gid} = (getgrgid($gid))[0] if ( !exists($GidCache{$gid}) );
+ return $GidCache{$gid};
+}
+
+sub TarWrite
+{
+ my($fh, $dataRef) = @_;
+
+ if ( !defined($dataRef) ) {
+ #
+ # do flush by padding to a full $WriteBufSz
+ #
+ my $data = "\0" x ($WriteBufSz - length($WriteBuf));
+ $dataRef = \$data;
+ }
+ if ( length($WriteBuf) + length($$dataRef) < $WriteBufSz ) {
+ #
+ # just buffer and return
+ #
+ $WriteBuf .= $$dataRef;
+ return;
+ }
+ my $done = $WriteBufSz - length($WriteBuf);
+ if ( syswrite($fh, $WriteBuf . substr($$dataRef, 0, $done))
+ != $WriteBufSz ) {
+ print(STDERR "Unable to write to output file\n");
+ exit(1);
+ }
+ while ( $done + $WriteBufSz <= length($$dataRef) ) {
+ if ( syswrite($fh, substr($$dataRef, $done, $WriteBufSz))
+ != $WriteBufSz ) {
+ print(STDERR "Unable to write to output file\n");
+ exit(1);
+ }
+ $done += $WriteBufSz;
+ }
+ $WriteBuf = substr($$dataRef, $done);
+}
+
+sub TarWritePad
+{
+ my($fh, $size) = @_;
+
+ if ( $size % $tar_header_length ) {
+ my $data = "\0" x ($tar_header_length - ($size % $tar_header_length));
+ TarWrite($fh, \$data);
+ }
+}
+
+sub TarWriteHeader
+{
+ my($fh, $hdr) = @_;
+
+ $hdr->{uname} = UidLookup($hdr->{uid}) if ( !defined($hdr->{uname}) );
+ $hdr->{gname} = GidLookup($hdr->{gid}) if ( !defined($hdr->{gname}) );
+ my $devmajor = defined($hdr->{devmajor}) ? sprintf("%07o", $hdr->{devmajor})
+ : "";
+ my $devminor = defined($hdr->{devminor}) ? sprintf("%07o", $hdr->{devminor})
+ : "";
+ my $data = pack($tar_pack_header,
+ substr($hdr->{name}, 0, 99),
+ sprintf("%07o", $hdr->{mode}),
+ sprintf("%07o", $hdr->{uid}),
+ sprintf("%07o", $hdr->{gid}),
+ sprintf("%011o", $hdr->{size}),
+ sprintf("%011o", $hdr->{mtime}),
+ "", #checksum field - space padded by pack("A8")
+ $hdr->{type},
+ substr($hdr->{linkname}, 0, 99),
+ $hdr->{magic} || 'ustar ',
+ $hdr->{version} || ' ',
+ $hdr->{uname},
+ $hdr->{gname},
+ $devmajor,
+ $devminor,
+ "" # prefix is empty
+ );
+ substr($data, 148, 7) = sprintf("%06o\0", unpack("%16C*",$data));
+ TarWrite($fh, \$data);
+}
+
+sub TarWriteFileInfo
+{
+ my($fh, $hdr) = @_;
+
+ #
+ # Handle long link names (symbolic links)
+ #
+ if ( length($hdr->{linkname}) > 99 ) {
+ my %h;
+ my $data = $hdr->{linkname} . "\0";
+ $h{name} = "././\@LongLink";
+ $h{type} = "K";
+ $h{size} = length($data);
+ TarWriteHeader($fh, \%h);
+ TarWrite($fh, \$data);
+ TarWritePad($fh, length($data));
+ }
+ #
+ # Handle long file names
+ #
+ if ( length($hdr->{name}) > 99 ) {
+ my %h;
+ my $data = $hdr->{name} . "\0";
+ $h{name} = "././\@LongLink";
+ $h{type} = "L";
+ $h{size} = length($data);
+ TarWriteHeader($fh, \%h);
+ TarWrite($fh, \$data);
+ TarWritePad($fh, length($data));
+ }
+ TarWriteHeader($fh, $hdr);
+}
+
+my $Attr;
+my $AttrDir;
+
+sub TarWriteFile
+{
+ my($fh, $fullName, $mangle, $compress, $tarPathOverride) = @_;
+ my($tarPath);
+
+ if ( $fullName =~ m{^\Q$TopDir/pc/$Host/$Num/$ShareName\E(.*)}
+ || (defined($NumF)
+ && $fullName =~ m{^\Q$TopDir/pc/$Host/$NumF/$ShareNameF\E(.*)}) ) {
+ $tarPath = $mangle ? $bpc->fileNameUnmangle($1) : $1;
+ } else {
+ print(STDERR "Unexpected file name from find: $fullName\n");
+ return;
+ }
+ $tarPath = $tarPathOverride if ( defined($tarPathOverride) );
+ (my $dir = $fullName) =~ s{/([^/]*)$}{};
+ my $fileName = $mangle ? $bpc->fileNameUnmangle($1) : $1;
+ if ( $mangle && $AttrDir ne $dir ) {
+ $AttrDir = $dir;
+ $Attr = BackupPC::Attrib->new({ compress => $compress });
+ if ( -f $Attr->fileName($dir) && !$Attr->read($dir) ) {
+ print(STDERR "Can't read attribute file in $dir\n");
+ $ErrorCnt++;
+ $Attr = undef;
+ }
+ }
+ my $hdr = $Attr->get($fileName) if ( defined($Attr) );
+ if ( !defined($hdr) ) {
+ #
+ # No attributes. Must be an old style backup. Reconstruct
+ # what we can. Painful part is computing the size if compression
+ # is on: only method is to uncompress the file.
+ #
+ my @s = stat($fullName);
+ $hdr = {
+ type => -d _ ? BPC_FTYPE_DIR : BPC_FTYPE_FILE,
+ mode => $s[2],
+ uid => $s[4],
+ gid => $s[5],
+ size => -f _ ? $s[7] : 0,
+ mtime => $s[9],
+ };
+ if ( $compress && -f _ ) {
+ #
+ # Compute the correct size by reading the whole file
+ #
+ my $f = BackupPC::FileZIO->open($fullName, 0, $compress);
+ if ( !defined($f) ) {
+ print(STDERR "Unable to open file $fullName\n");
+ $ErrorCnt++;
+ return;
+ }
+ my($data, $size);
+ while ( $f->read(\$data, $BufSize) > 0 ) {
+ $size += length($data);
+ }
+ $f->close;
+ $hdr->{size} = $size;
+ }
+ }
+ if ( defined($PathRemove)
+ && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) {
+ substr($tarPath, 0, length($PathRemove)) = $PathAdd;
+ }
+ $tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// );
+ $tarPath =~ s{//+}{/}g;
+ $hdr->{name} = $tarPath;
+
+ if ( $hdr->{type} == BPC_FTYPE_DIR ) {
+ #
+ # Directory: just write the header
+ #
+ $hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} );
+ TarWriteFileInfo($fh, $hdr);
+ $DirCnt++;
+ } elsif ( $hdr->{type} == BPC_FTYPE_FILE ) {
+ #
+ # Regular file: write the header and file
+ #
+ my $f = BackupPC::FileZIO->open($fullName, 0, $compress);
+ if ( !defined($f) ) {
+ print(STDERR "Unable to open file $fullName\n");
+ $ErrorCnt++;
+ return;
+ }
+ TarWriteFileInfo($fh, $hdr);
+ my($data, $size);
+ while ( $f->read(\$data, $BufSize) > 0 ) {
+ TarWrite($fh, \$data);
+ $size += length($data);
+ }
+ $f->close;
+ TarWritePad($fh, $size);
+ $FileCnt++;
+ $ByteCnt += $size;
+ } elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) {
+ #
+ # Hardlink file: either write a hardlink or the complete file
+ # depending upon whether the linked-to file will be written
+ # to the archive.
+ #
+ # Start by reading the contents of the link.
+ #
+ my $f = BackupPC::FileZIO->open($fullName, 0, $compress);
+ if ( !defined($f) ) {
+ print(STDERR "Unable to open file $fullName\n");
+ $ErrorCnt++;
+ return;
+ }
+ my $data;
+ while ( $f->read(\$data, $BufSize) > 0 ) {
+ $hdr->{linkname} .= $data;
+ }
+ $f->close;
+ #
+ # Check @ARGV and the list of hardlinked files we have explicity
+ # dumped to see if we have dumped this file or not
+ #
+ my $done = 0;
+ my $name = $hdr->{linkname};
+ $name =~ s{^\./}{/};
+ if ( $HardLinkExtraFiles{$name} ) {
+ $done = 1;
+ } else {
+ foreach my $arg ( @ARGV ) {
+ $arg =~ s{^\./+}{/};
+ $arg =~ s{/+$}{};
+ $done = 1 if ( $name eq $arg || $name =~ /^\Q$arg\// );
+ }
+ }
+ if ( $done ) {
+ #
+ # Target file will be or was written, so just remember
+ # the hardlink so we can dump it later.
+ #
+ push(@HardLinks, $hdr);
+ $SpecialCnt++;
+ } else {
+ #
+ # Have to dump the original file. Just call the top-level
+ # routine, so that we save the hassle of dealing with
+ # mangling, merging and attributes.
+ #
+ $HardLinkExtraFiles{$hdr->{linkname}} = 1;
+ archiveWrite($fh, $hdr->{linkname}, $hdr->{name});
+ }
+ } elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) {
+ #
+ # Symbolic link: read the symbolic link contents into the header
+ # and write the header.
+ #
+ my $f = BackupPC::FileZIO->open($fullName, 0, $compress);
+ if ( !defined($f) ) {
+ print(STDERR "Unable to open symlink file $fullName\n");
+ $ErrorCnt++;
+ return;
+ }
+ my $data;
+ while ( $f->read(\$data, $BufSize) > 0 ) {
+ $hdr->{linkname} .= $data;
+ }
+ $f->close;
+ $hdr->{size} = 0;
+ TarWriteFileInfo($fh, $hdr);
+ $SpecialCnt++;
+ } elsif ( $hdr->{type} == BPC_FTYPE_CHARDEV
+ || $hdr->{type} == BPC_FTYPE_BLOCKDEV
+ || $hdr->{type} == BPC_FTYPE_FIFO ) {
+ #
+ # Special files: for char and block special we read the
+ # major and minor numbers from a plain file.
+ #
+ if ( $hdr->{type} != BPC_FTYPE_FIFO ) {
+ my $f = BackupPC::FileZIO->open($fullName, 0, $compress);
+ my $data;
+ if ( !defined($f) || $f->read(\$data, $BufSize) < 0 ) {
+ print(STDERR "Unable to open/read char/block special file"
+ . " $fullName\n");
+ $f->close if ( defined($f) );
+ $ErrorCnt++;
+ return;
+ }
+ $f->close;
+ if ( $data =~ /(\d+),(\d+)/ ) {
+ $hdr->{devmajor} = $1;
+ $hdr->{devminor} = $2;
+ }
+ }
+ $hdr->{size} = 0;
+ TarWriteFileInfo($fh, $hdr);
+ $SpecialCnt++;
+ } else {
+ print(STDERR "Got unknown type $hdr->{type} for $hdr->{name}\n");
+ $ErrorCnt++;
+ }
+}
+
+#
+# Does a recursive find of $dir, filling in from the (filled dump)
+# directory $dirF. Handles the cases where $dir and $dirF might
+# or might not be mangled etc.
+#
+sub MergeFind
+{
+ my($fh, $dir, $dirF) = @_;
+
+ my(@Dir, $fLast);
+ if ( -d $dir ) {
+ TarWriteFile($fh, $dir, $Mangle, $Compress);
+ } elsif ( -d $dirF ) {
+ TarWriteFile($fh, $dirF, $MangleF, $CompressF);
+ }
+ if ( opendir(DIR, $dir) ) {
+ @Dir = readdir(DIR);
+ closedir(DIR);
+ }
+ if ( defined($NumF) && opendir(DIR, $dirF) ) {
+ if ( $Mangle == $MangleF ) {
+ @Dir = (@Dir, readdir(DIR));
+ } else {
+ foreach my $f ( readdir(DIR) ) {
+ if ( $Mangle ) {
+ push(@Dir, $bpc->fileNameMangle($f));
+ } else {
+ push(@Dir, $bpc->fileNameUnmangle($f));
+ }
+ }
+ }
+ }
+ foreach my $f ( sort({$a cmp $b} @Dir) ) {
+ next if ( $f eq "." || $f eq ".."
+ || $f eq $fLast || ($Mangle && $f eq "attrib") );
+ $fLast = $f;
+ my($fF) = $f;
+ if ( $Mangle != $MangleF ) {
+ $fF = $Mangle ? $bpc->fileNameUnmangle($f)
+ : $bpc->fileNameMangle($f);
+ }
+ if ( -e "$dir/$f" ) {
+ if ( -d "$dir/$f" ) {
+ MergeFind($fh, "$dir/$f", "$dirF/$fF");
+ } else {
+ TarWriteFile($fh, "$dir/$f", $Mangle, $Compress);
+ }
+ } elsif ( -e "$dirF/$fF" ) {
+ if ( -d "$dirF/$fF" ) {
+ MergeFind($fh, "$dir/$f", "$dirF/$fF");
+ } else {
+ TarWriteFile($fh, "$dirF/$fF", $MangleF, $CompressF);
+ }
+ } else {
+ print(STDERR "$0: Botch on $dir, $dirF, $f, $fF\n");
+ $ErrorCnt++;
+ }
+ }
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_tarExtract: extract data from a dump
+#
+# DESCRIPTION
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::Attrib qw(:all);
+use BackupPC::FileZIO;
+use BackupPC::PoolWrite;
+use File::Path;
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+if ( @ARGV != 3 ) {
+ print("usage: $0 <host> <shareName> <compressLevel>\n");
+ exit(1);
+}
+if ( $ARGV[0] !~ /^([\w\.-]+)$/ ) {
+ print("$0: bad host name '$ARGV[0]'\n");
+ exit(1);
+}
+my $host = $1;
+if ( $ARGV[1] !~ /^([\w\s\.\/\$-]+)$/ ) {
+ print("$0: bad share name '$ARGV[1]'\n");
+ exit(1);
+}
+my $ShareNameUM = $1;
+my $ShareName = $bpc->fileNameEltMangle($ShareNameUM);
+if ( $ARGV[2] !~ /^(\d+)$/ ) {
+ print("$0: bad compress level '$ARGV[2]'\n");
+ exit(1);
+}
+my $Compress = $1;
+
+#
+# This constant and the line of code below that uses it is borrowed
+# from Archive::Tar. Thanks to Calle Dybedahl and Stephen Zander.
+# See www.cpan.org.
+#
+# Archive::Tar is Copyright 1997 Calle Dybedahl. All rights reserved.
+# Copyright 1998 Stephen Zander. All rights reserved.
+#
+my $tar_unpack_header
+ = 'A100 A8 A8 A8 A12 A12 A8 A1 A100 A6 A2 A32 A32 A8 A8 A155 x12';
+my $tar_header_length = 512;
+
+my $BufSize = 1048576; # 1MB or 2^20
+my $MaxFiles = 20;
+my $Errors = 0;
+my $OutDir = "$TopDir/pc/$host/new";
+my %Attrib = ();
+
+my $ExistFileCnt = 0;
+my $ExistFileSize = 0;
+my $ExistFileCompSize = 0;
+my $TotalFileCnt = 0;
+my $TotalFileSize = 0;
+
+sub TarRead
+{
+ my($fh, $totBytes) = @_;
+ my($numBytes, $newBytes, $data);
+
+ $data = "\0" x $totBytes;
+ while ( $numBytes < $totBytes ) {
+ $newBytes = sysread($fh,
+ substr($data, $numBytes, $totBytes - $numBytes),
+ $totBytes - $numBytes);
+ if ( $newBytes <= 0 ) {
+ print(STDERR "Unexpected end of tar archive (tot = $totBytes,"
+ . " num = $numBytes, posn = " . sysseek($fh, 0, 1) . ")\n");
+ $Errors++;
+ return;
+ }
+ $numBytes += $newBytes;
+ }
+ return $data;
+}
+
+sub TarReadHeader
+{
+ my($fh) = @_;
+
+ return $1 if ( TarRead($fh, $tar_header_length) =~ /(.*)/s );
+ return;
+}
+
+sub TarFlush
+{
+ my($fh, $size) = @_;
+
+ if ( $size % $tar_header_length ) {
+ TarRead($fh, $tar_header_length - ($size % $tar_header_length));
+ }
+}
+
+sub TarReadFileInfo
+{
+ my($fh) = @_;
+ my($head, $longName, $longLink);
+ my($name, $mode, $uid, $gid, $size, $mtime, $chksum, $type,
+ $linkname, $magic, $version, $uname, $gname, $devmajor,
+ $devminor, $prefix);
+
+ while ( 1 ) {
+ $head = TarReadHeader($fh);
+ return if ( $head eq "" || $head eq "\0" x $tar_header_length );
+ ($name, # string
+ $mode, # octal number
+ $uid, # octal number
+ $gid, # octal number
+ $size, # octal number
+ $mtime, # octal number
+ $chksum, # octal number
+ $type, # character
+ $linkname, # string
+ $magic, # string
+ $version, # two bytes
+ $uname, # string
+ $gname, # string
+ $devmajor, # octal number
+ $devminor, # octal number
+ $prefix) = unpack($tar_unpack_header, $head);
+
+ $mode = oct $mode;
+ $uid = oct $uid;
+ $gid = oct $gid;
+ $size =~ s/^6/2/; # fix bug in smbclient for >=2GB files
+ $size =~ s/^7/3/; # fix bug in smbclient for >=2GB files
+ $size = oct $size;
+ $mtime = oct $mtime;
+ $chksum = oct $chksum;
+ $devmajor = oct $devmajor;
+ $devminor = oct $devminor;
+ $name = "$prefix/$name" if $prefix;
+ $prefix = "";
+ substr ($head, 148, 8) = " ";
+ if (unpack ("%16C*", $head) != $chksum) {
+ print(STDERR "$name: checksum error at "
+ . sysseek($fh, 0, 1) , "\n");
+ $Errors++;
+ }
+ if ( $type eq "L" ) {
+ $longName = TarRead($fh, $size) || return;
+ # remove trailing NULL
+ $longName = substr($longName, 0, $size - 1);
+ TarFlush($fh, $size);
+ next;
+ } elsif ( $type eq "K" ) {
+ $longLink = TarRead($fh, $size) || return;
+ # remove trailing NULL
+ $longLink = substr($longLink, 0, $size - 1);
+ TarFlush($fh, $size);
+ next;
+ }
+ $name = $longName if ( defined($longName) );
+ $linkname = $longLink if ( defined($longLink) );
+ $name =~ s{^\./+}{};
+ $name =~ s{/+$}{};
+ $name =~ s{//+}{/}g;
+ return {
+ name => $name,
+ mangleName => $bpc->fileNameMangle($name),
+ mode => $mode,
+ uid => $uid,
+ gid => $gid,
+ size => $size,
+ mtime => $mtime,
+ type => $type,
+ linkname => $linkname,
+ devmajor => $devmajor,
+ devminor => $devminor,
+ };
+ }
+}
+
+sub TarReadFile
+{
+ my($fh) = @_;
+ my $f = TarReadFileInfo($fh) || return;
+ my($dir, $file);
+
+ if ( $f->{name} eq "" ) {
+ # top-level dir
+ $dir = "";
+ $file = $ShareNameUM;
+ } else {
+ ($file = $f->{name}) =~ s{.*?([^/]*)$}{$1}; # unmangled file
+ if ( ($dir = $f->{mangleName}) =~ m{(.*)/.*} ) {
+ $dir = "$ShareName/$1";
+ } else {
+ $dir = $ShareName;
+ }
+ }
+ if ( !defined($Attrib{$dir}) ) {
+ foreach my $d ( keys(%Attrib) ) {
+ next if ( $dir =~ m{^\Q$d/} );
+ attributeWrite($d);
+ }
+ $Attrib{$dir} = BackupPC::Attrib->new({ compress => $Compress });
+ if ( -f $Attrib{$dir}->fileName("$OutDir/$dir")
+ && !$Attrib{$dir}->read("$OutDir/$dir") ) {
+ printf(STDERR "Unable to read attribute file %s\n",
+ $Attrib{$dir}->fileName("$OutDir/$dir"));
+ $Errors++;
+ }
+ }
+ if ( $f->{type} == BPC_FTYPE_DIR ) {
+ #
+ # Directory
+ #
+ mkpath("$OutDir/$ShareName/$f->{mangleName}", 0, 0777)
+ if ( !-d "$OutDir/$ShareName/$f->{mangleName}" );
+ } elsif ( $f->{type} == BPC_FTYPE_FILE ) {
+ #
+ # Regular file
+ #
+ my($nRead);
+ #print("Reading $f->{name}, $f->{size} bytes, type $f->{type}\n");
+ my $poolWrite = BackupPC::PoolWrite->new($bpc,
+ "$OutDir/$ShareName/$f->{mangleName}",
+ $f->{size}, $Compress);
+ while ( $nRead < $f->{size} ) {
+ my $thisRead = $f->{size} - $nRead < $BufSize
+ ? $f->{size} - $nRead : $BufSize;
+ my $data = TarRead($fh, $thisRead);
+ if ( $data eq "" ) {
+ print(STDERR "Unexpected end of tar archive during read\n");
+ $Errors++;
+ return;
+ }
+ $poolWrite->write(\$data);
+ $nRead += $thisRead;
+ }
+ processClose($poolWrite, "$ShareName/$f->{mangleName}", $f->{size});
+ TarFlush($fh, $f->{size});
+ } elsif ( $f->{type} == BPC_FTYPE_HARDLINK ) {
+ #
+ # Hardlink to another file. GNU tar is clever about files
+ # that are hardlinks to each other. The first link will be
+ # sent as a regular file. The additional links will be sent
+ # as this type. We store the hardlink just like a symlink:
+ # the link name (path of the linked-to file) is stored in
+ # a plain file.
+ #
+ $f->{size} = length($f->{linkname});
+ my $poolWrite = BackupPC::PoolWrite->new($bpc,
+ "$OutDir/$ShareName/$f->{mangleName}",
+ $f->{size}, $Compress);
+ $poolWrite->write(\$f->{linkname});
+ processClose($poolWrite, "$ShareName/$f->{mangleName}", $f->{size});
+ } elsif ( $f->{type} == BPC_FTYPE_SYMLINK ) {
+ #
+ # Symbolic link: write the value of the link to a plain file,
+ # that we pool as usual (ie: we don't create a symlink).
+ # The attributes remember the original file type.
+ # We also change the size to reflect the size of the link
+ # contents.
+ #
+ $f->{size} = length($f->{linkname});
+ my $poolWrite = BackupPC::PoolWrite->new($bpc,
+ "$OutDir/$ShareName/$f->{mangleName}",
+ $f->{size}, $Compress);
+ $poolWrite->write(\$f->{linkname});
+ processClose($poolWrite, "$ShareName/$f->{mangleName}", $f->{size});
+ } elsif ( $f->{type} == BPC_FTYPE_CHARDEV
+ || $f->{type} == BPC_FTYPE_BLOCKDEV
+ || $f->{type} == BPC_FTYPE_FIFO ) {
+ #
+ # Special files: for char and block special we write the
+ # major and minor numbers to a plain file, that we pool
+ # as usual. For a pipe file we create an empty file.
+ # The attributes remember the original file type.
+ #
+ my $data;
+ if ( $f->{type} == BPC_FTYPE_FIFO ) {
+ $data = "";
+ } else {
+ $data = "$f->{devmajor},$f->{devminor}";
+ }
+ my $poolWrite = BackupPC::PoolWrite->new($bpc,
+ "$OutDir/$ShareName/$f->{mangleName}",
+ length($data), $Compress);
+ $poolWrite->write(\$data);
+ $f->{size} = length($data);
+ processClose($poolWrite, "$ShareName/$f->{mangleName}", length($data));
+ } else {
+ print("Got unknown type $f->{type} for $f->{name}\n");
+ $Errors++;
+ }
+ $Attrib{$dir}->set($file, {
+ type => $f->{type},
+ mode => $f->{mode},
+ uid => $f->{uid},
+ gid => $f->{gid},
+ size => $f->{size},
+ mtime => $f->{mtime},
+ });
+ return 1;
+}
+
+sub attributeWrite
+{
+ my($d) = @_;
+ my($poolWrite);
+
+ return if ( !defined($Attrib{$d}) );
+ if ( $Attrib{$d}->fileCount ) {
+ my $data = $Attrib{$d}->writeData;
+ my $fileName = $Attrib{$d}->fileName("$OutDir/$d");
+ my $poolWrite = BackupPC::PoolWrite->new($bpc, $fileName,
+ length($data), $Compress);
+ $poolWrite->write(\$data);
+ processClose($poolWrite, $Attrib{$d}->fileName($d), length($data));
+ }
+ delete($Attrib{$d});
+}
+
+sub processClose
+{
+ my($poolWrite, $fileName, $origSize) = @_;
+ my($exists, $digest, $outSize, $errs) = $poolWrite->close;
+
+ if ( @$errs ) {
+ print(STDERR join("", @$errs));
+ $Errors += @$errs;
+ }
+ $TotalFileCnt++;
+ $TotalFileSize += $origSize;
+ if ( $exists ) {
+ $ExistFileCnt++;
+ $ExistFileSize += $origSize;
+ $ExistFileCompSize += $outSize;
+ } elsif ( $outSize > 0 ) {
+ print(NEW_FILES "$digest $origSize $fileName\n");
+ }
+}
+
+mkpath("$OutDir/$ShareName", 0, 0777);
+open(NEW_FILES, ">>$TopDir/pc/$host/NewFileList")
+ || die("can't open $TopDir/pc/$host/NewFileList");
+1 while ( TarReadFile(*STDIN) );
+1 while ( sysread(STDIN, my $discard, 1024) );
+
+#
+# Flush out remaining attributes.
+#
+foreach my $d ( keys(%Attrib) ) {
+ attributeWrite($d);
+}
+close(NEW_FILES);
+
+#
+# Report results to BackupPC_dump
+#
+print("Done: $Errors errors, $ExistFileCnt filesExist,"
+ . " $ExistFileSize sizeExist, $ExistFileCompSize sizeExistComp,"
+ . " $TotalFileCnt filesTotal, $TotalFileSize sizeTotal\n");
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_trashClean: remove all the files in $TopDir/trash.
+#
+# DESCRIPTION
+# BackupPC_trashClean is started once by BackupPC. Every 5 minutes
+# it wakes up and removes all the files or directories in $TopDir/trash.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+$bpc->ChildInit();
+
+###########################################################################
+# Empty trash every so often (eg: every 5 minutes)
+###########################################################################
+while ( 1 ) {
+ print("processState running\n");
+ 1 while ( $bpc->RmTreeTrashEmpty("$TopDir/trash") );
+ print("processState idle\n");
+ sleep($Conf{TrashCleanSleepSec} || 300);
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_zcat: uncompress files to stdout
+#
+# DESCRIPTION
+#
+# Usage: BackupPC_zcat [files...]
+#
+# BackupPC_zcat is a command-line utility for uncompressing BackupPC
+# compressed files.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+
+use lib "__INSTALLDIR__/lib";
+use Compress::Zlib;
+use BackupPC::FileZIO;
+
+sub zcat
+{
+ my($fh, $fileName) = @_;
+ my($data, $r);
+
+ while ( ($r = $fh->read(\$data, 65536)) > 0 ) {
+ print($data);
+ }
+ if ( $r < 0 ) {
+ print(STDERR "$0: can't uncompress $fileName\n");
+ }
+ $fh->close();
+}
+
+if ( @ARGV ) {
+ while ( @ARGV ) {
+ if ( defined(my $fh = BackupPC::FileZIO->open($ARGV[0], 0, 1)) ) {
+ zcat($fh, $ARGV[0]);
+ } else {
+ print(STDERR "$0: can't open $ARGV[0]\n");
+ exit(1);
+ }
+ shift @ARGV;
+ }
+} else {
+ my $fh = BackupPC::FileZIO->open(*STDIN, 0, 1);
+ zcat($fh, "stdin");
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-
+#
+# BackupPC_zipCreate: create a zip archive of an existing dump
+# for restore on a client.
+#
+# DESCRIPTION
+#
+# Usage: BackupPC_zipCreate [-t] [-h host] [-n dumpNum] [-s shareName]
+# [-r pathRemove] [-p pathAdd] [-c compressionLevel]
+# files/directories...
+#
+# Flags:
+# Required options:
+#
+# -h host host from which the zip archive is created
+# -n dumpNum dump number from which the zip archive is created
+# -s shareName share name from which the zip archive is created
+#
+# Other options:
+# -t print summary totals
+# -r pathRemove path prefix that will be replaced with pathAdd
+# -p pathAdd new path prefix
+# -c level compression level (default is 0, no compression)
+#
+# The -h, -n and -s options specify which dump is used to generate
+# the zip archive. The -r and -p options can be used to relocate
+# the paths in the zip archive so extracted files can be placed
+# in a location different from their original location.
+#
+# AUTHOR
+# Guillaume Filion <gfk@users.sourceforge.net>
+# Based on Backup_tarCreate by Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2002 Craig Barratt and Guillaume Filion
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use lib "__INSTALLDIR__/lib";
+use Archive::Zip qw(:ERROR_CODES);
+use File::Path;
+use Getopt::Std;
+use IO::Handle;
+use BackupPC::Lib;
+use BackupPC::Attrib qw(:all);
+use BackupPC::FileZIO;
+use BackupPC::Zip::FileMember;
+
+die("BackupPC::Lib->new failed\n") if ( !(my $bpc = BackupPC::Lib->new) );
+my $TopDir = $bpc->TopDir();
+my $BinDir = $bpc->BinDir();
+my %Conf = $bpc->Conf();
+
+my %opts;
+getopts("th:n:p:r:s:c:", \%opts);
+
+if ( @ARGV < 1 ) {
+ print(STDERR "usage: $0 [-t] [-h host] [-n dumpNum] [-s shareName]"
+ . " [-r pathRemove] [-p pathAdd] [-c compressionLevel]"
+ . " files/directories...\n");
+ exit(1);
+}
+
+if ( $opts{h} !~ /^([\w\.-]+)$/ ) {
+ print(STDERR "$0: bad host name '$opts{h}'\n");
+ exit(1);
+}
+my $Host = $opts{h};
+
+if ( $opts{n} !~ /^(\d+)$/ ) {
+ print(STDERR "$0: bad dump number '$opts{n}'\n");
+ exit(1);
+}
+my $Num = $opts{n};
+
+$opts{c} = 0 if ( $opts{c} eq "" );
+if ( $opts{c} !~ /^(\d+)$/ ) {
+ print(STDERR "$0: invalid compression level '$opts{c}'. 0=none, 9=max\n");
+ exit(1);
+}
+my $compLevel = $opts{c};
+
+my @Backups = $bpc->BackupInfoRead($Host);
+my($Compress, $Mangle, $CompressF, $MangleF, $NumF, $i);
+my $FileCnt = 0;
+my $ByteCnt = 0;
+my $DirCnt = 0;
+my $SpecialCnt = 0;
+my $ErrorCnt = 0;
+
+for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ if ( !$Backups[$i]{noFill} ) {
+ #
+ # Remember the most recent filled backup
+ #
+ $NumF = $Backups[$i]{num};
+ $MangleF = $Backups[$i]{mangle};
+ $CompressF = $Backups[$i]{compress};
+ }
+ next if ( $Backups[$i]{num} != $Num );
+ $Compress = $Backups[$i]{compress};
+ $Mangle = $Backups[$i]{mangle};
+ if ( !$Backups[$i]{noFill} ) {
+ # no need to back-fill a filled backup
+ $NumF = $MangleF = $CompressF = undef;
+ }
+ last;
+}
+if ( $i >= @Backups ) {
+ print(STDERR "$0: bad backup number $Num for host $Host\n");
+ exit(1);
+}
+
+my $PathRemove = $1 if ( $opts{r} =~ /(.+)/ );
+my $PathAdd = $1 if ( $opts{p} =~ /(.+)/ );
+if ( $opts{s} !~ /^([\w\s\.\/\$-]+)$/ ) {
+ print(STDERR "$0: bad share name '$opts{s}'\n");
+ exit(1);
+}
+my $ShareNameOrig = $opts{s};
+my $ShareName = $Mangle ? $bpc->fileNameEltMangle($ShareNameOrig)
+ : $ShareNameOrig;
+my $ShareNameF = $MangleF ? $bpc->fileNameEltMangle($ShareNameOrig)
+ : $ShareNameOrig;
+
+my $BufSize = 1048576; # 1MB or 2^20
+my(%UidCache, %GidCache);
+#my $fh = *STDOUT;
+my $fh = new IO::Handle;
+$fh->fdopen(fileno(STDOUT),"w");
+my $zipfh = Archive::Zip->new();
+
+foreach my $dir ( @ARGV ) {
+ archiveWrite($zipfh, $dir);
+}
+
+sub archiveWrite
+{
+ my($zipfh, $dir, $zipPathOverride) = @_;
+ if ( $dir =~ m{(^|/)\.\.(/|$)} || $dir !~ /^(.*)$/ ) {
+ print(STDERR "$0: bad directory '$dir'\n");
+ $ErrorCnt++;
+ next;
+ }
+ (my $DirOrig = $1) =~ s{/+$}{};
+ $DirOrig =~ s{^\.?/+}{};
+ my($Dir, $DirF, $FullPath, $FullPathF);
+ if ( $DirOrig eq "" ) {
+ $Dir = $DirF = "";
+ $FullPath = "$TopDir/pc/$Host/$Num/$ShareName";
+ $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF"
+ if ( defined($NumF) );
+ } else {
+ $Dir = $Mangle ? $bpc->fileNameMangle($DirOrig) : $DirOrig;
+ $DirF = $MangleF ? $bpc->fileNameMangle($DirOrig) : $DirOrig;
+ $FullPath = "$TopDir/pc/$Host/$Num/$ShareName/$Dir";
+ $FullPathF = "$TopDir/pc/$Host/$NumF/$ShareNameF/$DirF"
+ if ( defined($NumF) );
+ }
+ if ( -f $FullPath ) {
+ ZipWriteFile($zipfh, $FullPath, $Mangle, $Compress, $zipPathOverride);
+ } elsif ( -d $FullPath || (defined($NumF) && -d $FullPathF) ) {
+ MergeFind($zipfh, $FullPath, $FullPathF);
+ } elsif ( defined($NumF) && -f $FullPathF ) {
+ ZipWriteFile($zipfh, $FullPathF, $MangleF, $CompressF,
+ $zipPathOverride);
+ } else {
+ print(STDERR "$0: $Host, backup $Num, doesn't have a directory or file"
+ . " $ShareNameOrig/$DirOrig\n");
+ $ErrorCnt++;
+ }
+}
+
+# Create Zip file
+print STDERR "Can't write Zip file\n"
+ unless $zipfh->writeToFileHandle($fh, 0) == Archive::Zip::AZ_OK;
+
+#
+# print out totals if requested
+#
+if ( $opts{t} ) {
+ print STDERR "Done: $FileCnt files, $ByteCnt bytes, $DirCnt dirs,",
+ " $SpecialCnt specials ignored, $ErrorCnt errors\n";
+}
+exit(0);
+
+###########################################################################
+# Subroutines
+###########################################################################
+
+sub UidLookup
+{
+ my($uid) = @_;
+
+ $UidCache{$uid} = (getpwuid($uid))[0] if ( !exists($UidCache{$uid}) );
+ return $UidCache{$uid};
+}
+
+sub GidLookup
+{
+ my($gid) = @_;
+
+ $GidCache{$gid} = (getgrgid($gid))[0] if ( !exists($GidCache{$gid}) );
+ return $GidCache{$gid};
+}
+
+my $Attr;
+my $AttrDir;
+
+sub ZipWriteFile
+{
+ my($zipfh, $fullName, $mangle, $compress, $zipPathOverride) = @_;
+ my($tarPath);
+
+ if ( $fullName =~ m{^\Q$TopDir/pc/$Host/$Num/$ShareName\E(.*)}
+ || (defined($NumF)
+ && $fullName =~ m{^\Q$TopDir/pc/$Host/$NumF/$ShareNameF\E(.*)}) ) {
+ $tarPath = $mangle ? $bpc->fileNameUnmangle($1) : $1;
+ } else {
+ print(STDERR "Unexpected file name from find: $fullName\n");
+ return;
+ }
+ $tarPath = $zipPathOverride if ( defined($zipPathOverride) );
+ (my $dir = $fullName) =~ s{/([^/]*)$}{};
+ my $fileName = $mangle ? $bpc->fileNameUnmangle($1) : $1;
+ if ( $mangle && $AttrDir ne $dir ) {
+ $AttrDir = $dir;
+ $Attr = BackupPC::Attrib->new({ compress => $compress });
+ if ( -f $Attr->fileName($dir) && !$Attr->read($dir) ) {
+ print(STDERR "Can't read attribute file in $dir\n");
+ $ErrorCnt++;
+ $Attr = undef;
+ }
+ }
+ my $hdr = $Attr->get($fileName) if ( defined($Attr) );
+ if ( !defined($hdr) ) {
+ #
+ # No attributes. Must be an old style backup. Reconstruct
+ # what we can. Painful part is computing the size if compression
+ # is on: only method is to uncompress the file.
+ #
+ my @s = stat($fullName);
+ $hdr = {
+ type => -d _ ? BPC_FTYPE_DIR : BPC_FTYPE_FILE,
+ mode => $s[2],
+ uid => $s[4],
+ gid => $s[5],
+ size => -f _ ? $s[7] : 0,
+ mtime => $s[9],
+ };
+ if ( $compress && -f _ ) {
+ #
+ # Compute the correct size by reading the whole file
+ #
+ my $f = BackupPC::FileZIO->open($fullName, 0, $compress);
+ if ( !defined($f) ) {
+ print(STDERR "Unable to open file $fullName\n");
+ $ErrorCnt++;
+ return;
+ }
+ my($data, $size);
+ while ( $f->read(\$data, $BufSize) > 0 ) {
+ $size += length($data);
+ }
+ $f->close;
+ $hdr->{size} = $size;
+ }
+ }
+ if ( defined($PathRemove)
+ && substr($tarPath, 0, length($PathRemove)) eq $PathRemove ) {
+ substr($tarPath, 0, length($PathRemove)) = $PathAdd;
+ }
+ $tarPath = "./" . $tarPath if ( $tarPath !~ /^\.\// );
+ $tarPath =~ s{//+}{/}g;
+ $hdr->{name} = $tarPath;
+ my $zipmember; # Container to hold the file/directory to zip.
+
+ if ( $hdr->{type} == BPC_FTYPE_DIR ) {
+ #
+ # Directory: just write the header
+ #
+ $hdr->{name} .= "/" if ( $hdr->{name} !~ m{/$} );
+ $zipmember = Archive::Zip::Member->newDirectoryNamed($hdr->{name});
+ $DirCnt++;
+ } elsif ( $hdr->{type} == BPC_FTYPE_FILE ) {
+ #
+ # Regular file: write the header and file
+ #
+ $zipmember = BackupPC::Zip::FileMember->newFromFileNamed(
+ $fullName,
+ $hdr->{name},
+ $hdr->{size},
+ $compress
+ );
+ $FileCnt++;
+ $ByteCnt += $hdr->{size};
+ } elsif ( $hdr->{type} == BPC_FTYPE_HARDLINK ) {
+ #
+ # Hardlink file: not supported by Zip, so just make a copy
+ # of the pointed-to file.
+ #
+ # Start by reading the contents of the link.
+ #
+ my $f = BackupPC::FileZIO->open($fullName, 0, $compress);
+ if ( !defined($f) ) {
+ print(STDERR "Unable to open file $fullName\n");
+ $ErrorCnt++;
+ return;
+ }
+ my $data;
+ while ( $f->read(\$data, $BufSize) > 0 ) {
+ $hdr->{linkname} .= $data;
+ }
+ $f->close;
+ #
+ # Dump the original file. Just call the top-level
+ # routine, so that we save the hassle of dealing with
+ # mangling, merging and attributes.
+ #
+ archiveWrite($zipfh, $hdr->{linkname}, $hdr->{name});
+ } elsif ( $hdr->{type} == BPC_FTYPE_SYMLINK ) {
+ #
+ # Symlinks can't be Zipped. 8(
+ # We could zip the pointed-to dir/file (just like hardlink), but we
+ # have to avoid the infinite-loop case of a symlink pointed to a
+ # directory above us. Ignore for now. Could be a comand-line
+ # option later.
+ #
+ $SpecialCnt++;
+ } elsif ( $hdr->{type} == BPC_FTYPE_CHARDEV
+ || $hdr->{type} == BPC_FTYPE_BLOCKDEV
+ || $hdr->{type} == BPC_FTYPE_FIFO ) {
+ #
+ # Special files can't be Zipped. 8(
+ #
+ $SpecialCnt++;
+ } else {
+ print(STDERR "Got unknown type $hdr->{type} for $hdr->{name}\n");
+ $ErrorCnt++;
+ }
+ return if ( !$zipmember );
+
+ # Set the attributes and permissions
+ $zipmember->setLastModFileDateTimeFromUnix($hdr->{mtime});
+ $zipmember->unixFileAttributes($hdr->{mode});
+ # Zip files don't accept uid and gid, so we put them in the comment field.
+ $zipmember->fileComment("uid=".$hdr->{uid}." gid=".$hdr->{gid})
+ if ( $hdr->{uid} || $hdr->{gid} );
+
+ # Specify the compression level for this member
+ $zipmember->desiredCompressionLevel($compLevel) if ($compLevel =~ /[0-9]/);
+
+ # Finally Zip the member
+ $zipfh->addMember($zipmember);
+}
+
+#
+# Does a recursive find of $dir, filling in from the (filled dump)
+# directory $dirF. Handles the cases where $dir and $dirF might
+# or might not be mangled etc.
+#
+sub MergeFind
+{
+ my($zipfh, $dir, $dirF) = @_;
+
+ my(@Dir, $fLast);
+ if ( -d $dir ) {
+ ZipWriteFile($zipfh, $dir, $Mangle, $Compress);
+ } elsif ( -d $dirF ) {
+ ZipWriteFile($zipfh, $dirF, $MangleF, $CompressF);
+ }
+ if ( opendir(DIR, $dir) ) {
+ @Dir = readdir(DIR);
+ closedir(DIR);
+ }
+ if ( defined($NumF) && opendir(DIR, $dirF) ) {
+ if ( $Mangle == $MangleF ) {
+ @Dir = (@Dir, readdir(DIR));
+ } else {
+ foreach my $f ( readdir(DIR) ) {
+ if ( $Mangle ) {
+ push(@Dir, $bpc->fileNameMangle($f));
+ } else {
+ push(@Dir, $bpc->fileNameUnmangle($f));
+ }
+ }
+ }
+ }
+ foreach my $f ( sort({$a cmp $b} @Dir) ) {
+ next if ( $f eq "." || $f eq ".."
+ || $f eq $fLast || ($Mangle && $f eq "attrib") );
+ $fLast = $f;
+ my($fF) = $f;
+ if ( $Mangle != $MangleF ) {
+ $fF = $Mangle ? $bpc->fileNameUnmangle($f)
+ : $bpc->fileNameMangle($f);
+ }
+ if ( -e "$dir/$f" ) {
+ if ( -d "$dir/$f" ) {
+ MergeFind($zipfh, "$dir/$f", "$dirF/$fF");
+ } else {
+ ZipWriteFile($zipfh, "$dir/$f", $Mangle, $Compress);
+ }
+ } elsif ( -e "$dirF/$fF" ) {
+ if ( -d "$dirF/$fF" ) {
+ MergeFind($zipfh, "$dir/$f", "$dirF/$fF");
+ } else {
+ ZipWriteFile($zipfh, "$dirF/$fF", $MangleF, $CompressF);
+ }
+ } else {
+ print(STDERR "$0: Botch on $dir, $dirF, $f, $fF\n");
+ $ErrorCnt++;
+ }
+ }
+}
--- /dev/null
+#!/bin/perl -T
+#============================================================= -*-perl-*-w
+#
+# BackupPC_Admin: Apache/CGI interface for BackupPC.
+#
+# DESCRIPTION
+# BackupPC_Admin provides a flexible web interface for BackupPC.
+# It is a CGI script that runs under Apache.
+#
+# It requires that Apache pass in $ENV{SCRIPT_NAME} and
+# $ENV{REMOTE_USER}. The latter requires .ht_access style
+# authentication. Replace the code below if you are using some other
+# type of authentication, and have a different way of getting the
+# user name.
+#
+# Also, this script needs to run as the BackupPC user. To accomplish
+# this the script is typically installed as setuid to the BackupPC user.
+#
+# AUTHOR
+# Craig Barratt <cbarratt@users.sourceforge.net>
+#
+# COPYRIGHT
+# Copyright (C) 2001 Craig Barratt
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+#========================================================================
+#
+# Version 1.5.0, released 2 Aug 2002.
+#
+# See http://backuppc.sourceforge.net.
+#
+#========================================================================
+
+use strict;
+use CGI;
+use lib "__INSTALLDIR__/lib";
+use BackupPC::Lib;
+use BackupPC::FileZIO;
+use BackupPC::Attrib qw(:all);
+use Data::Dumper;
+
+use vars qw($Cgi %In $MyURL $User %Conf $TopDir $BinDir $bpc);
+use vars qw(%Status %Info %Jobs @BgQueue @UserQueue @CmdQueue
+ %QueueLen %StatusHost);
+use vars qw($Hosts $HostsMTime $ConfigMTime $PrivAdmin);
+use vars qw(%UserEmailInfo $UserEmailInfoMTime %RestoreReq);
+
+$Cgi = new CGI;
+%In = $Cgi->Vars;
+
+#
+# We require that Apache pass in $ENV{SCRIPT_NAME} and $ENV{REMOTE_USER}.
+# The latter requires .ht_access style authentication. Replace this
+# code if you are using some other type of authentication, and have
+# a different way of getting the user name.
+#
+$MyURL = $ENV{SCRIPT_NAME};
+$User = $ENV{REMOTE_USER};
+
+if ( !defined($bpc) ) {
+ ErrorExit("BackupPC::Lib->new failed: check apache error_log\n")
+ if ( !($bpc = BackupPC::Lib->new) );
+ $TopDir = $bpc->TopDir();
+ $BinDir = $bpc->BinDir();
+ %Conf = $bpc->Conf();
+ $ConfigMTime = $bpc->ConfigMTime();
+} elsif ( $bpc->ConfigMTime() != $ConfigMTime ) {
+ $bpc->ConfigRead();
+ %Conf = $bpc->Conf();
+ $ConfigMTime = $bpc->ConfigMTime();
+}
+
+#
+# Clean up %ENV for taint checking
+#
+delete @ENV{qw(IFS CDPATH ENV BASH_ENV)};
+$ENV{PATH} = $Conf{MyPath};
+
+#
+# Verify we are running as the correct user
+#
+if ( $Conf{BackupPCUserVerify}
+ && $> != (my $uid = (getpwnam($Conf{BackupPCUser}))[2]) ) {
+ ErrorExit("Wrong user: my userid is $>, instead of $uid"
+ . " ($Conf{BackupPCUser})\n");
+}
+
+if ( !defined($Hosts) || $bpc->HostsMTime() != $HostsMTime ) {
+ $HostsMTime = $bpc->HostsMTime();
+ $Hosts = $bpc->HostInfoRead();
+}
+
+my %ActionDispatch = (
+ "summary" => \&Action_Summary,
+ "Start Incr Backup" => \&Action_StartStopBackup,
+ "Start Full Backup" => \&Action_StartStopBackup,
+ "Stop/Dequeue Backup" => \&Action_StartStopBackup,
+ "queue" => \&Action_Queue,
+ "view" => \&Action_View,
+ "LOGlist" => \&Action_LOGlist,
+ "emailSummary" => \&Action_EmailSummary,
+ "browse" => \&Action_Browse,
+ "Restore" => \&Action_Restore,
+ "RestoreFile" => \&Action_RestoreFile,
+ "hostInfo" => \&Action_HostInfo,
+ "generalInfo" => \&Action_GeneralInfo,
+ "restoreInfo" => \&Action_RestoreInfo,
+);
+
+#
+# Set default actions, then call sub handler
+#
+$In{action} ||= "hostInfo" if ( defined($In{host}) );
+$In{action} = "generalInfo" if ( !defined($ActionDispatch{$In{action}}) );
+$ActionDispatch{$In{action}}();
+exit(0);
+
+###########################################################################
+# Action handling subroutines
+###########################################################################
+
+sub Action_Summary
+{
+ my($fullTot, $fullSizeTot, $incrTot, $incrSizeTot, $str,
+ $strNone, $strGood, $hostCntGood, $hostCntNone);
+
+ $hostCntGood = $hostCntNone = 0;
+ GetStatusInfo("hosts");
+ my $Privileged = CheckPermission();
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can view PC summaries." );
+ }
+ foreach my $host ( sort(keys(%Status)) ) {
+ my($fullDur, $incrCnt, $incrAge, $fullSize, $fullRate);
+ my @Backups = $bpc->BackupInfoRead($host);
+ my $fullCnt = $incrCnt = 0;
+ my $fullAge = $incrAge = -1;
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ if ( $Backups[$i]{type} eq "full" ) {
+ $fullCnt++;
+ if ( $fullAge < 0 || $Backups[$i]{startTime} > $fullAge ) {
+ $fullAge = $Backups[$i]{startTime};
+ $fullSize = $Backups[$i]{size} / (1024 * 1024);
+ $fullDur = $Backups[$i]{endTime} - $Backups[$i]{startTime};
+ }
+ $fullSizeTot += $Backups[$i]{size} / (1024 * 1024);
+ } else {
+ $incrCnt++;
+ if ( $incrAge < 0 || $Backups[$i]{startTime} > $incrAge ) {
+ $incrAge = $Backups[$i]{startTime};
+ }
+ $incrSizeTot += $Backups[$i]{size} / (1024 * 1024);
+ }
+ }
+ if ( $fullAge < 0 ) {
+ $fullAge = "";
+ $fullRate = "";
+ } else {
+ $fullAge = sprintf("%.1f", (time - $fullAge) / (24 * 3600));
+ $fullRate = sprintf("%.2f",
+ $fullSize / ($fullDur <= 0 ? 1 : $fullDur));
+ }
+ if ( $incrAge < 0 ) {
+ $incrAge = "";
+ } else {
+ $incrAge = sprintf("%.1f", (time - $incrAge) / (24 * 3600));
+ }
+ $fullTot += $fullCnt;
+ $incrTot += $incrCnt;
+ $fullSize = sprintf("%.2f", $fullSize / 1000);
+ $str = <<EOF;
+<tr><td> ${HostLink($host)} </td>
+ <td align="center"> ${UserLink($Hosts->{$host}{user})} </td>
+ <td align="center"> $fullCnt </td>
+ <td align="center"> $fullAge </td>
+ <td align="center"> $fullSize </td>
+ <td align="center"> $fullRate </td>
+ <td align="center"> $incrCnt </td>
+ <td align="center"> $incrAge </td>
+ <td align="center"> $Status{$host}{state} </td>
+ <td> $Status{$host}{reason} </td></tr>
+EOF
+ if ( @Backups == 0 ) {
+ $hostCntNone++;
+ $strNone .= $str;
+ } else {
+ $hostCntGood++;
+ $strGood .= $str;
+ }
+ }
+ $fullSizeTot = sprintf("%.2f", $fullSizeTot / 1000);
+ $incrSizeTot = sprintf("%.2f", $incrSizeTot / 1000);
+ my $now = timeStamp2(time);
+
+ Header("BackupPC: Server Summary");
+
+ print <<EOF;
+
+${h1("BackupPC Summary")}
+<p>
+This status was generated at $now.
+<p>
+
+${h2("Hosts with good Backups")}
+<p>
+There are $hostCntGood hosts that have been backed up, for a total of:
+<ul>
+<li> $fullTot full backups of total size ${fullSizeTot}GB
+ (prior to pooling and compression),
+<li> $incrTot incr backups of total size ${incrSizeTot}GB
+ (prior to pooling and compression).
+</ul>
+<table border>
+<tr><td> Host </td>
+ <td align="center"> User </td>
+ <td align="center"> #Full </td>
+ <td align="center"> Full Age/days </td>
+ <td align="center"> Full Size/GB </td>
+ <td align="center"> Speed MB/sec </td>
+ <td align="center"> #Incr </td>
+ <td align="center"> Incr Age/days </td>
+ <td align="center"> State </td>
+ <td align="center"> Last attempt </td></tr>
+$strGood
+</table>
+<p>
+
+${h2("Hosts with no Backups")}
+<p>
+There are $hostCntNone hosts with no backups.
+<p>
+<table border>
+<tr><td> Host </td>
+ <td align="center"> User </td>
+ <td align="center"> #Full </td>
+ <td align="center"> Full Age/days </td>
+ <td align="center"> Full Size/GB </td>
+ <td align="center"> Speed MB/sec </td>
+ <td align="center"> #Incr </td>
+ <td align="center"> Incr Age/days </td>
+ <td align="center"> Current State </td>
+ <td align="center"> Last backup attempt </td></tr>
+$strNone
+</table>
+EOF
+ Trailer();
+}
+
+sub Action_StartStopBackup
+{
+ my($str, $reply);
+ my $start = 1 if ( $In{action} eq "Start Incr Backup"
+ || $In{action} eq "Start Full Backup" );
+ my $doFull = $In{action} eq "Start Full Backup" ? 1 : 0;
+ my $type = $doFull ? "full" : "incremental";
+ my $host = $In{host};
+ my $Privileged = CheckPermission($host);
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can stop or start backups on"
+ . " ${EscapeHTML($host)}.");
+ }
+ ServerConnect();
+
+ if ( $In{doit} ) {
+ if ( $start ) {
+ if ( $Hosts->{$host}{dhcp} ) {
+ $reply = $bpc->ServerMesg("backup $In{hostIP} $host"
+ . " $User $doFull");
+ $str = "Backup requested on DHCP $host ($In{hostIP}) by"
+ . " $User from $ENV{REMOTE_ADDR}";
+ } else {
+ $reply = $bpc->ServerMesg("backup $host $host $User $doFull");
+ $str = "Backup requested on $host by $User";
+ }
+ } else {
+ $reply = $bpc->ServerMesg("stop $host $User $In{backoff}");
+ $str = "Backup stopped/dequeued on $host by $User";
+ }
+ Header("BackupPC: Backup Requested on $host");
+ print <<EOF;
+${h1($str)}
+<p>
+Reply from server was: $reply
+<p>
+Go back to <a href="$MyURL?host=$host">$host home page</a>.
+EOF
+ Trailer();
+ } else {
+ if ( $start ) {
+ my $ipAddr = ConfirmIPAddress($host);
+
+ Header("BackupPC: Start Backup Confirm on $host");
+ print <<EOF;
+${h1("Are you sure?")}
+<p>
+You are about to start a $type backup on $host.
+
+<form action="$MyURL" method="get">
+<input type="hidden" name="host" value="$host">
+<input type="hidden" name="hostIP" value="$ipAddr">
+<input type="hidden" name="doit" value="1">
+Do you really want to do this?
+<input type="submit" value="$In{action}" name="action">
+<input type="submit" value="No" name="">
+</form>
+EOF
+ } else {
+ my $backoff = "";
+ GetStatusInfo("host($host)");
+ if ( $StatusHost{backoffTime} > time ) {
+ $backoff = sprintf("%.1f",
+ ($StatusHost{backoffTime} - time) / 3600);
+ }
+ Header("BackupPC: Stop Backup Confirm on $host");
+ print <<EOF;
+${h1("Are you sure?")}
+<p>
+You are about to stop/dequeue backups on $host;
+
+<form action="$MyURL" method="get">
+<input type="hidden" name="host" value="$host">
+<input type="hidden" name="doit" value="1">
+Also, please don't start another backup for
+<input type="text" name="backoff" size="10" value="$backoff"> hours.
+<p>
+Do you really want to do this?
+<input type="submit" value="$In{action}" name="action">
+<input type="submit" value="No" name="">
+</form>
+EOF
+ }
+ Trailer();
+ }
+}
+
+sub Action_Queue
+{
+ my($strBg, $strUser, $strCmd);
+
+ GetStatusInfo("queues");
+ my $Privileged = CheckPermission();
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can view queues." );
+ }
+
+ while ( @BgQueue ) {
+ my $req = pop(@BgQueue);
+ my($reqTime) = timeStamp2($req->{reqTime});
+ $strBg .= <<EOF;
+<tr><td> ${HostLink($req->{host})} </td>
+ <td align="center"> $reqTime </td>
+ <td align="center"> $req->{user} </td></tr>
+EOF
+ }
+ while ( @UserQueue ) {
+ my $req = pop(@UserQueue);
+ my $reqTime = timeStamp2($req->{reqTime});
+ $strUser .= <<EOF;
+<tr><td> ${HostLink($req->{host})} </td>
+ <td align="center"> $reqTime </td>
+ <td align="center"> $req->{user} </td></tr>
+EOF
+ }
+ while ( @CmdQueue ) {
+ my $req = pop(@CmdQueue);
+ my $reqTime = timeStamp2($req->{reqTime});
+ (my $cmd = $req->{cmd}) =~ s/$BinDir\///;
+ $strCmd .= <<EOF;
+<tr><td> ${HostLink($req->{host})} </td>
+ <td align="center"> $reqTime </td>
+ <td align="center"> $req->{user} </td>
+ <td> $cmd </td></tr>
+EOF
+ }
+ Header("BackupPC: Queue Summary");
+ print <<EOF;
+${h1("Backup Queue Summary")}
+<p>
+${h2("User Queue Summary")}
+<p>
+The following user requests are currently queued:
+<table border>
+<tr><td> Host </td>
+ <td> Req Time </td>
+ <td> User </td></tr>
+$strUser
+</table>
+<p>
+
+${h2("Background Queue Summary")}
+<p>
+The following background requests are currently queued:
+<table border>
+<tr><td> Host </td>
+ <td> Req Time </td>
+ <td> User </td></tr>
+$strBg
+</table>
+<p>
+
+${h2("Command Queue Summary")}
+<p>
+The following command requests are currently queued:
+<table border>
+<tr><td> Host </td>
+ <td> Req Time </td>
+ <td> User </td>
+ <td> Command </td></tr>
+$strCmd
+</table>
+EOF
+ Trailer();
+}
+
+sub Action_View
+{
+ my $Privileged = CheckPermission($In{host});
+ my $compress = 0;
+ my $fh;
+ my $host = $In{host};
+ my $num = $In{num};
+ my $type = $In{type};
+ my $linkHosts = 0;
+ my($file, $comment);
+ my $ext = $num ne "" ? ".$num" : "";
+
+ ErrorExit("Invalid number $num") if ( $num ne "" && $num !~ /^\d+$/ );
+ if ( $type eq "XferLOG" ) {
+ $file = "$TopDir/pc/$host/SmbLOG$ext";
+ $file = "$TopDir/pc/$host/XferLOG$ext" if ( !-f $file && !-f "$file.z");
+ } elsif ( $type eq "XferLOGbad" ) {
+ $file = "$TopDir/pc/$host/SmbLOG.bad";
+ $file = "$TopDir/pc/$host/XferLOG.bad" if ( !-f $file && !-f "$file.z");
+ } elsif ( $type eq "XferErrbad" ) {
+ $file = "$TopDir/pc/$host/SmbLOG.bad";
+ $file = "$TopDir/pc/$host/XferLOG.bad" if ( !-f $file && !-f "$file.z");
+ $comment = "(Extracting only Errors)";
+ } elsif ( $type eq "XferErr" ) {
+ $file = "$TopDir/pc/$host/SmbLOG$ext";
+ $file = "$TopDir/pc/$host/XferLOG$ext" if ( !-f $file && !-f "$file.z");
+ $comment = "(Extracting only Errors)";
+ } elsif ( $type eq "RestoreLOG" ) {
+ $file = "$TopDir/pc/$host/RestoreLOG$ext";
+ } elsif ( $type eq "RestoreErr" ) {
+ $file = "$TopDir/pc/$host/RestoreLOG$ext";
+ $comment = "(Extracting only Errors)";
+ } elsif ( $host ne "" && $type eq "config" ) {
+ $file = "$TopDir/pc/$host/config.pl";
+ } elsif ( $type eq "docs" ) {
+ $file = "$BinDir/../doc/BackupPC.html";
+ if ( open(LOG, $file) ) {
+ Header("BackupPC: Documentation");
+ print while ( <LOG> );
+ close(LOG);
+ Trailer();
+ } else {
+ ErrorExit("Unable to open $file: configuration problem?");
+ }
+ return;
+ } elsif ( $type eq "config" ) {
+ $file = "$TopDir/conf/config.pl";
+ } elsif ( $type eq "hosts" ) {
+ $file = "$TopDir/conf/hosts";
+ } elsif ( $host ne "" ) {
+ $file = "$TopDir/pc/$host/LOG$ext";
+ } else {
+ $file = "$TopDir/log/LOG$ext";
+ $linkHosts = 1;
+ }
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can view log or config files." );
+ }
+ if ( !-f $file && -f "$file.z" ) {
+ $file .= ".z";
+ $compress = 1;
+ }
+ Header("BackupPC: Log File $file");
+ print <<EOF;
+${h1("Log File $file $comment")}
+<p>
+EOF
+ if ( defined($fh = BackupPC::FileZIO->open($file, 0, $compress)) ) {
+ my $mtimeStr = $bpc->timeStamp((stat($file))[9], 1);
+ print <<EOF;
+Contents of log file <tt>$file</tt>, modified $mtimeStr $comment
+EOF
+ print "<pre>";
+ if ( $type eq "XferErr" || $type eq "XferErrbad"
+ || $type eq "RestoreErr" ) {
+ my $skipped;
+ while ( 1 ) {
+ $_ = $fh->readLine();
+ if ( $_ eq "" ) {
+ print("[ skipped $skipped lines ]\n") if ( $skipped );
+ last;
+ }
+ if ( /smb: \\>/
+ || /^\s*(\d+) \(\s*\d+\.\d kb\/s\) (.*)$/
+ || /^tar: dumped \d+ files/
+ || /^added interface/i
+ || /^restore tar file /i
+ || /^restore directory /i
+ || /^tarmode is now/i
+ || /^Total bytes written/i
+ || /^Domain=/i
+ || /^Getting files newer than/i
+ || /^Output is \/dev\/null/
+ || /^\([\d\.]* kb\/s\) \(average [\d\.]* kb\/s\)$/
+ || /^\s+directory \\/
+ || /^Timezone is/
+ || /^\.\//
+ ) {
+ $skipped++;
+ next;
+ }
+ print("[ skipped $skipped lines ]\n") if ( $skipped );
+ $skipped = 0;
+ print ${EscapeHTML($_)};
+ }
+ } elsif ( $linkHosts ) {
+ while ( 1 ) {
+ $_ = $fh->readLine();
+ last if ( $_ eq "" );
+ my $s = ${EscapeHTML($_)};
+ $s =~ s/\b([\w-]+)\b/defined($Hosts->{$1})
+ ? ${HostLink($1)} : $1/eg;
+ print $s;
+ }
+ } elsif ( $type eq "config" ) {
+ while ( 1 ) {
+ $_ = $fh->readLine();
+ last if ( $_ eq "" );
+ # remove any passwords and user names
+ s/(SmbSharePasswd.*=.*['"]).*(['"])/$1$2/ig;
+ s/(SmbShareUserName.*=.*['"]).*(['"])/$1$2/ig;
+ s/(ServerMesgSecret.*=.*['"]).*(['"])/$1$2/ig;
+ print ${EscapeHTML($_)};
+ }
+ } else {
+ while ( 1 ) {
+ $_ = $fh->readLine();
+ last if ( $_ eq "" );
+ print ${EscapeHTML($_)};
+ }
+ }
+ $fh->close();
+ } else {
+ printf("<pre>\nCan't open log file $file\n");
+ }
+ print <<EOF;
+</pre>
+EOF
+ Trailer();
+}
+
+sub Action_LOGlist
+{
+ my $Privileged = CheckPermission($In{host});
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can view log files.");
+ }
+ my $host = $In{host};
+ my($url0, $hdr, $root, $str);
+ if ( $host ne "" ) {
+ $root = "$TopDir/pc/$host/LOG";
+ $url0 = "&host=$host";
+ $hdr = "for host $host";
+ } else {
+ $root = "$TopDir/log/LOG";
+ $url0 = "";
+ $hdr = "";
+ }
+ for ( my $i = -1 ; ; $i++ ) {
+ my $url1 = "";
+ my $file = $root;
+ if ( $i >= 0 ) {
+ $file .= ".$i";
+ $url1 = "&num=$i";
+ }
+ $file .= ".z" if ( !-f $file && -f "$file.z" );
+ last if ( !-f $file );
+ my $mtimeStr = $bpc->timeStamp((stat($file))[9], 1);
+ my $size = (stat($file))[7];
+ $str .= <<EOF;
+<tr><td> <a href="$MyURL?action=view&type=LOG$url0$url1"><tt>$file</tt></a> </td>
+ <td align="right"> $size </td>
+ <td> $mtimeStr </td></tr>
+EOF
+ }
+ Header("BackupPC: Log File History");
+ print <<EOF;
+
+${h1("Log File History $hdr")}
+<p>
+<table border>
+<tr><td align="center"> File </td>
+ <td align="center"> Size </td>
+ <td align="center"> Modification time </td></tr>
+$str
+</table>
+EOF
+ Trailer();
+}
+
+sub Action_EmailSummary
+{
+ my $Privileged = CheckPermission();
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can view email summaries." );
+ }
+ GetStatusInfo("hosts");
+ ReadUserEmailInfo();
+ my(%EmailStr, $str);
+ foreach my $u ( keys(%UserEmailInfo) ) {
+ next if ( !defined($UserEmailInfo{$u}{lastTime}) );
+ my $emailTimeStr = timeStamp2($UserEmailInfo{$u}{lastTime});
+ $EmailStr{$UserEmailInfo{$u}{lastTime}} .= <<EOF;
+<tr><td>${UserLink($u)} </td>
+ <td>${HostLink($UserEmailInfo{$u}{lastHost})} </td>
+ <td>$emailTimeStr </td>
+ <td>$UserEmailInfo{$u}{lastSubj} </td></tr>
+EOF
+ }
+ foreach my $t ( sort({$b <=> $a} keys(%EmailStr)) ) {
+ $str .= $EmailStr{$t};
+ }
+ Header("BackupPC: Email Summary");
+ print <<EOF;
+${h1("Recent Email Summary (Reverse time order)")}
+<p>
+<table border>
+<tr><td align="center"> Recipient </td>
+ <td align="center"> Host </td>
+ <td align="center"> Time </td>
+ <td align="center"> Subject </td></tr>
+$str
+</table>
+EOF
+ Trailer();
+}
+
+sub Action_Browse
+{
+ my $Privileged = CheckPermission($In{host});
+ my($i, $dirStr, $fileStr, $mangle);
+ my($numF, $compressF, $mangleF, $fullDirF);
+ my $checkBoxCnt = 0; # checkbox counter
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can browse backup files"
+ . " for host ${EscapeHTML($In{host})}." );
+ }
+ my $host = $In{host};
+ my $num = $In{num};
+ my $dir = $In{dir};
+ if ( $host eq "" ) {
+ ErrorExit("Empty host name.");
+ }
+ #
+ # Find the requested backup and the previous filled backup
+ #
+ my @Backups = $bpc->BackupInfoRead($host);
+ for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ if ( !$Backups[$i]{noFill} ) {
+ $numF = $Backups[$i]{num};
+ $mangleF = $Backups[$i]{mangle};
+ $compressF = $Backups[$i]{compress};
+ }
+ last if ( $Backups[$i]{num} == $num );
+ }
+ if ( $i >= @Backups ) {
+ ErrorExit("Backup number $num for host ${EscapeHTML($host)} does"
+ . " not exist.");
+ }
+ if ( !$Backups[$i]{noFill} ) {
+ # no need to back-fill a filled backup
+ $numF = $mangleF = $compressF = undef;
+ }
+ my $backupTime = timeStamp2($Backups[$i]{startTime});
+ my $backupAge = sprintf("%.1f", (time - $Backups[$i]{startTime})
+ / (24 * 3600));
+ $mangle = $Backups[$i]{mangle};
+ if ( $dir eq "" || $dir eq "." || $dir eq ".." ) {
+ if ( !opendir(DIR, "$TopDir/pc/$host/$num") ) {
+ ErrorExit("Can't browse bad directory name"
+ . " ${EscapeHTML(\"$TopDir/pc/$host/$num\")}");
+ }
+ #
+ # Read this directory and find the first directory
+ #
+ foreach my $f ( readdir(DIR) ) {
+ next if ( $f eq "." || $f eq ".." );
+ if ( -d "$TopDir/pc/$host/$num/$f" ) {
+ $dir = "/$f";
+ last;
+ }
+ }
+ closedir(DIR);
+ if ( $dir eq "" || $dir eq "." || $dir eq ".." ) {
+ ErrorExit("Directory ${EscapeHTML(\"$TopDir/pc/$host/$num\")}"
+ . " is empty");
+ }
+ }
+ my $relDir = $dir;
+ my $fullDir = "$TopDir/pc/$host/$num/$relDir";
+ if ( defined($numF) ) {
+ # get full path to filled backup
+ if ( $mangle && !$mangleF ) {
+ $fullDirF = "$TopDir/pc/$host/$numF/"
+ . $bpc->fileNameUnmangle($relDir);
+ } else {
+ $fullDirF = "$TopDir/pc/$host/$numF/$relDir";
+ }
+ }
+ my $currDir = undef;
+ #
+ # Read attributes for the directory and optionally for the filled backup
+ #
+ my $attr = BackupPC::Attrib->new({ compress => $Backups[$i]{compress}});
+ my $attrF = BackupPC::Attrib->new({ compress => $compressF})
+ if ( defined($numF) );
+ $attr->read($fullDir) if ( -f $attr->fileName($fullDir) );
+ if ( defined($numF) && -f $attrF->fileName($fullDirF)
+ && $attrF->read($fullDirF) ) {
+ $attr->merge($attrF);
+ }
+ #
+ # Loop up the directory tree until we hit the top.
+ #
+ my(@DirStrPrev);
+ while ( 1 ) {
+ my($fLast, $fum, $fLastum, @DirStr);
+
+ if ( $fullDir =~ m{(^|/)\.\.(/|$)} || !opendir(DIR, $fullDir) ) {
+ ErrorExit("Can't browse bad directory name"
+ . " ${EscapeHTML($fullDir)}");
+ }
+ #
+ # Read this directory and optionally the corresponding filled directory
+ #
+ my @Dir = readdir(DIR);
+ closedir(DIR);
+ if ( defined($numF) && opendir(DIR, $fullDirF) ) {
+ if ( $mangle == $mangleF ) {
+ @Dir = (@Dir, readdir(DIR));
+ } else {
+ foreach my $f ( readdir(DIR) ) {
+ next if ( $f eq "." || $f eq ".." );
+ push(@Dir, $bpc->fileNameMangle($f));
+ }
+ }
+ closedir(DIR);
+ }
+ my $fileCnt = 0; # file counter
+ $fLast = $dirStr = "";
+ #
+ # Loop over each of the files in this directory
+ #
+ my(@DirUniq);
+ foreach my $f ( sort({uc($a) cmp uc($b)} @Dir) ) {
+ next if ( $f eq "." || $f eq ".."
+ || $f eq $fLast || ($mangle && $f eq "attrib") );
+ $fLast = $f;
+ push(@DirUniq, $f);
+ }
+ while ( defined(my $f = shift(@DirUniq)) ) {
+ my $path = "$relDir/$f";
+ my($dirOpen, $gotDir, $imgStr, $img);
+ $fum = $mangle ? $bpc->fileNameUnmangle($f) : $f; # unmangled $f
+ my $fumURI = $fum; # URI escaped $f
+ $path =~ s{^/+}{/};
+ $path =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg;
+ $fumURI =~ s/([^\w.\/-])/uc sprintf("%%%02x", ord($1))/eg;
+ $dirOpen = 1 if ( defined($currDir) && $f eq $currDir );
+ if ( -d "$fullDir/$f" ) {
+ #
+ # Display directory if it exists in current backup.
+ # First find out if there are subdirs
+ #
+ my @s = (defined($numF) && -d "$fullDirF/$f")
+ ? stat("$fullDirF/$f")
+ : stat("$fullDir/$f");
+ my($bold, $unbold, $BGcolor);
+ $img |= 1 << 6;
+ $img |= 1 << 5 if ( $s[3] > 2 );
+ if ( $dirOpen ) {
+ $bold = "<b>";
+ $unbold = "</b>";
+ $img |= 1 << 2;
+ $img |= 1 << 3 if ( $s[3] > 2 );
+ }
+ my $imgFileName = sprintf("%07b.gif", $img);
+ $imgStr = "<img src=\"$Conf{CgiImageDirURL}/$imgFileName\" align=\"absmiddle\" width=\"9\" height=\"19\" border=\"0\">";
+ if ( "$relDir/$f" eq $dir ) {
+ $BGcolor = " bgcolor=\"$Conf{CgiHeaderBgColor}\"";
+ } else {
+ $BGcolor = "";
+ }
+ my $dirName = $fum;
+ $dirName =~ s/ / /g;
+ push(@DirStr, {needTick => 1,
+ tdArgs => $BGcolor,
+ link => <<EOF});
+<a href="$MyURL?action=browse&host=$host&num=$num&dir=$path">$imgStr</a><a href="$MyURL?action=browse&host=$host&num=$num&dir=$path" style="font-size:13px;font-family:arial;text-decoration:none;line-height:15px"> $bold$dirName$unbold</a></td></tr>
+EOF
+ $fileCnt++;
+ $gotDir = 1;
+ if ( $dirOpen ) {
+ my($lastTick, $doneLastTick);
+ foreach my $d ( @DirStrPrev ) {
+ $lastTick = $d if ( $d->{needTick} );
+ }
+ $doneLastTick = 1 if ( !defined($lastTick) );
+ foreach my $d ( @DirStrPrev ) {
+ $img = 0;
+ if ( $d->{needTick} ) {
+ $img |= 1 << 0;
+ }
+ if ( $d == $lastTick ) {
+ $img |= 1 << 4;
+ $doneLastTick = 1;
+ } elsif ( !$doneLastTick ) {
+ $img |= 1 << 3 | 1 << 4;
+ }
+ my $imgFileName = sprintf("%07b.gif", $img);
+ $imgStr = "<img src=\"$Conf{CgiImageDirURL}/$imgFileName\" align=\"absmiddle\" width=\"9\" height=\"19\" border=\"0\">";
+ push(@DirStr, {needTick => 0,
+ tdArgs => $d->{tdArgs},
+ link => $imgStr . $d->{link}
+ });
+ }
+ }
+ }
+ if ( $relDir eq $dir ) {
+ #
+ # This is the selected directory, so display all the files
+ #
+ my $attrStr;
+ if ( defined($a = $attr->get($fum)) ) {
+ my $mtimeStr = $bpc->timeStamp($a->{mtime});
+ my $typeStr = $attr->fileType2Text($a->{type});
+ my $modeStr = sprintf("0%o", $a->{mode} & 07777);
+ $attrStr .= <<EOF;
+ <td align="center">$typeStr</td>
+ <td align="right">$modeStr</td>
+ <td align="right">$a->{size}</td>
+ <td align="right">$mtimeStr</td>
+</tr>
+EOF
+ } else {
+ $attrStr .= "<td colspan=\"4\" align=\"center\"> </td>\n";
+ }
+ if ( $gotDir ) {
+ $fileStr .= <<EOF;
+<tr bgcolor="#ffffcc"><td><input type="checkbox" name="fcb$checkBoxCnt" value="$path"> <a href="$MyURL?action=browse&host=$host&num=$num&dir=$path">${EscapeHTML($fum)}</a></td>
+$attrStr
+</tr>
+EOF
+ } else {
+ $fileStr .= <<EOF;
+<tr bgcolor="#ffffcc"><td><input type="checkbox" name="fcb$checkBoxCnt" value="$path"> <a href="$MyURL?action=RestoreFile&host=$host&num=$num&dir=$path">${EscapeHTML($fum)}</a></td>
+$attrStr
+</tr>
+EOF
+ }
+ $checkBoxCnt++;
+ }
+ }
+ @DirStrPrev = @DirStr;
+ last if ( $relDir eq "" );
+ #
+ # Prune the last directory off $relDir
+ #
+ $relDir =~ s/(.*)\/(.*)/$1/;
+ $currDir = $2;
+ $fullDir = "$TopDir/pc/$host/$num/$relDir";
+ $fullDirF = "$TopDir/pc/$host/$numF/$relDir" if ( defined($numF) );
+ }
+ my $dirDisplay = $mangle ? $bpc->fileNameUnmangle($dir) : $dir;
+ $dirDisplay =~ s{//}{/}g;
+ my $filledBackup;
+ if ( defined($numF) ) {
+ $filledBackup = <<EOF;
+<li> This display is merged with backup #$numF, the most recent prior
+ filled (full) dump.
+EOF
+ }
+ Header("BackupPC: Browse backup $num for $host");
+
+ foreach my $d ( @DirStrPrev ) {
+ $dirStr .= "<tr><td$d->{tdArgs}>$d->{link}\n";
+ }
+
+ ### hide checkall button if there are no files
+ my ($topCheckAll, $checkAll, $fileHeader);
+ if ( $fileStr ) {
+ $fileHeader = <<EOF;
+ <tr bgcolor="$Conf{CgiHeaderBgColor}"><td align=center> Name</td>
+ <td align="center"> Type</td>
+ <td align="center"> Mode</td>
+ <td align="center"> Size</td>
+ <td align="center"> Mod time</td>
+ </tr>
+EOF
+ $checkAll = <<EOF;
+<tr bgcolor="#ffffcc"><td>
+<input type="checkbox" name="allFiles" onClick="return checkAll('allFiles');"> Select all
+</td><td colspan="4" align="center">
+<input type="submit" name="Submit" value="Restore selected files">
+</td></tr>
+EOF
+ # and put a checkall box on top if there are at least 20 files
+ if ( $checkBoxCnt >= 20 ) {
+ $topCheckAll = $checkAll;
+ $topCheckAll =~ s{allFiles}{allFilestop}g;
+ }
+ } else {
+ $fileStr = <<EOF;
+<tr><td bgcolor="#ffffff">The directory ${EscapeHTML($dirDisplay)} is empty
+</td></tr>
+EOF
+ }
+
+ print <<EOF;
+${h1("Backup browse for $host")}
+
+<script language="javascript" type="text/javascript">
+<!--
+
+ function checkAll(location)
+ {
+ for (var i=0;i<document.form1.elements.length;i++)
+ {
+ var e = document.form1.elements[i];
+ if ((e.checked || !e.checked) && e.name != 'all') {
+ if (eval("document.form1."+location+".checked")) {
+ e.checked = true;
+ } else {
+ e.checked = false;
+ }
+ }
+ }
+ }
+
+ function toggleThis(checkbox)
+ {
+ var cb = eval("document.form1."+checkbox);
+ cb.checked = !cb.checked;
+ }
+
+//-->
+</script>
+
+<ul>
+<li> You are browsing backup #$num, which started around $backupTime
+ ($backupAge days ago),
+$filledBackup
+<li> Click on a directory below to navigate into that directory,
+<li> Click on a file below to restore that file.
+</ul>
+
+${h2("Contents of ${EscapeHTML($dirDisplay)}")}
+<form name="form1" method="post" action="$MyURL">
+<input type="hidden" name="num" value="$num">
+<input type="hidden" name="host" value="$host">
+<input type="hidden" name="fcbMax" value="$checkBoxCnt">
+<input type="hidden" name="action" value="Restore">
+<br>
+<table>
+<tr><td valign="top">
+ <!--Navigate here:-->
+ <br><table align="center" border="0" cellpadding="0" cellspacing="0" bgcolor="#ffffff">
+ $dirStr
+ </table>
+</td><td width="3%">
+</td><td valign="top">
+ <!--Restore files here:-->
+ <br>
+ <table cellpadding="0" cellspacing="0" bgcolor="#333333"><tr><td>
+ <table border="0" width="100%" align="left" cellpadding="2" cellspacing="1">
+ $fileHeader
+ $topCheckAll
+ $fileStr
+ $checkAll
+ </table>
+ </td></tr></table>
+<br>
+<!--
+This is now in the checkAll row
+<input type="submit" name="Submit" value="Restore selected files">
+-->
+</td></tr></table>
+</form>
+EOF
+ Trailer();
+}
+
+sub Action_Restore
+{
+ my($str, $reply, $i);
+ my $Privileged = CheckPermission($In{host});
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can restore backup files"
+ . " for host ${EscapeHTML($In{host})}." );
+ }
+ my $host = $In{host};
+ my $num = $In{num};
+ my(@fileList, $fileListStr, $hiddenStr, $share, $pathHdr, $badFileCnt);
+ my @Backups = $bpc->BackupInfoRead($host);
+ for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ last if ( $Backups[$i]{num} == $num );
+ }
+ my $mangle = $Backups[$i]{mangle};
+ ServerConnect();
+
+ if ( !defined($Hosts->{$host}) ) {
+ ErrorExit("Bad host name ${EscapeHTML($host)}");
+ }
+ for ( my $i = 0 ; $i < $In{fcbMax} ; $i++ ) {
+ next if ( !defined($In{"fcb$i"}) );
+ (my $name = $In{"fcb$i"}) =~ s/%([0-9A-F]{2})/chr(hex($1))/eg;
+ $badFileCnt++ if ( $name =~ m{(^|/)\.\.(/|$)} );
+ if ( $name =~ m{^/+(.*?)(/.*)} ) {
+ $share = $1;
+ $name = $mangle ? $bpc->fileNameUnmangle($2) : $2;
+ if ( @fileList == 0 ) {
+ $pathHdr = $name;
+ } else {
+ while ( substr($name, 0, length($pathHdr)) ne $pathHdr ) {
+ $pathHdr = substr($pathHdr, 0, rindex($pathHdr, "/"));
+ }
+ }
+ }
+ push(@fileList, $name);
+ $share = $mangle ? $bpc->fileNameUnmangle($share) : $share;
+ $hiddenStr .= <<EOF;
+<input type="hidden" name="fcb$i" value="$In{'fcb' . $i}">
+EOF
+ $fileListStr .= <<EOF;
+<li> ${EscapeHTML($name)}
+EOF
+ }
+ $hiddenStr .= "<input type=\"hidden\" name=\"fcbMax\" value=\"$In{fcbMax}\">\n";
+ $badFileCnt++ if ( $In{pathHdr} =~ m{(^|/)\.\.(/|$)} );
+ $badFileCnt++ if ( $In{num} =~ m{(^|/)\.\.(/|$)} );
+ if ( @fileList == 0 ) {
+ ErrorExit("You haven't selected any files; please go Back to"
+ . " select some files.");
+ }
+ if ( $badFileCnt ) {
+ ErrorExit("Nice try, but you can't put '..' in any of the file names");
+ }
+ if ( @fileList == 1 ) {
+ $pathHdr =~ s/(.*)\/.*/$1/;
+ }
+ $pathHdr = "/" if ( $pathHdr eq "" );
+ if ( $In{type} != 0 && @fileList == $In{fcbMax} ) {
+ #
+ # All the files in the list were selected, so just restore the
+ # entire parent directory
+ #
+ @fileList = ( $pathHdr );
+ }
+ if ( $In{type} == 0 ) {
+ #
+ # Tell the user what options they have
+ #
+ Header("BackupPC: Restore Options for $host");
+ print <<EOF;
+${h1("Restore Options for $host")}
+<p>
+You have selected the following files/directories from
+share $share, backup number #$num:
+<ul>
+$fileListStr
+</ul>
+<p>
+You have three choices for restoring these files/directories.
+Please select one of the following options.
+<p>
+${h2("Option 1: Direct Restore")}
+<p>
+You can start a restore that will restore these files directly onto
+$host.
+<p>
+<b>Warning:</b> any existing files that match the ones you have
+selected will be overwritten!
+
+<form action="$MyURL" method="post">
+<input type="hidden" name="host" value="${EscapeHTML($host)}">
+<input type="hidden" name="num" value="$num">
+<input type="hidden" name="type" value="3">
+$hiddenStr
+<input type="hidden" value="$In{action}" name="action">
+<table border="0">
+<tr>
+ <td>Restore the files to host</td>
+ <td><input type="text" size="40" value="${EscapeHTML($host)}"
+ name="hostDest"></td>
+</tr><tr>
+ <td>Restore the files to share</td>
+ <td><input type="text" size="40" value="${EscapeHTML($share)}"
+ name="shareDest"></td>
+</tr><tr>
+ <td>Restore the files below dir<br>(relative to share)</td>
+ <td valign="top"><input type="text" size="40" maxlength="256"
+ value="${EscapeHTML($pathHdr)}" name="pathHdr"></td>
+</tr><tr>
+ <td><input type="submit" value="Start Restore" name=""></td>
+</table>
+</form>
+EOF
+
+ #
+ # Verify that Archive::Zip is available before showing the
+ # zip restore option
+ #
+ if ( eval { require Archive::Zip } ) {
+ print <<EOF;
+
+${h2("Option 2: Download Zip archive")}
+<p>
+You can download a Zip archive containing all the files/directories you have
+selected. You can then use a local application, such as WinZip,
+to view or extract any of the files.
+<p>
+<b>Warning:</b> depending upon which files/directories you have selected,
+this archive might be very very large. It might take many minutes to
+create and transfer the archive, and you will need enough local disk
+space to store it.
+<p>
+<form action="$MyURL" method="post">
+<input type="hidden" name="host" value="${EscapeHTML($host)}">
+<input type="hidden" name="num" value="$num">
+<input type="hidden" name="type" value="2">
+$hiddenStr
+<input type="hidden" value="$In{action}" name="action">
+<input type="checkbox" value="1" name="relative" checked> Make archive relative
+to ${EscapeHTML($pathHdr eq "" ? "/" : $pathHdr)}
+(otherwise archive will contain full paths).
+<br>
+Compression (0=off, 1=fast,...,9=best)
+<input type="text" size="6" value="5" name="compressLevel">
+<br>
+<input type="submit" value="Download Zip File" name="">
+</form>
+EOF
+ } else {
+ print <<EOF;
+
+${h2("Option 2: Download Zip archive")}
+<p>
+You could download a zip archive, but Archive::Zip is not installed.
+Please ask your system adminstrator to install Archive::Zip from
+<a href="http://www.cpan.org">www.cpan.org</a>.
+<p>
+EOF
+ }
+ print <<EOF;
+${h2("Option 3: Download Tar archive")}
+<p>
+You can download a Tar archive containing all the files/directories you
+have selected. You can then use a local application, such as tar or
+WinZip to view or extract any of the files.
+<p>
+<b>Warning:</b> depending upon which files/directories you have selected,
+this archive might be very very large. It might take many minutes to
+create and transfer the archive, and you will need enough local disk
+space to store it.
+<p>
+<form action="$MyURL" method="post">
+<input type="hidden" name="host" value="${EscapeHTML($host)}">
+<input type="hidden" name="num" value="$num">
+<input type="hidden" name="type" value="1">
+$hiddenStr
+<input type="hidden" value="$In{action}" name="action">
+<input type="checkbox" value="1" name="relative" checked> Make archive relative
+to ${EscapeHTML($pathHdr eq "" ? "/" : $pathHdr)}
+(otherwise archive will contain full paths).
+<br>
+<input type="submit" value="Download Tar File" name="">
+</form>
+EOF
+ Trailer();
+ } elsif ( $In{type} == 1 ) {
+ #
+ # Provide the selected files via a tar archive.
+ #
+ $SIG{CHLD} = 'IGNORE';
+ my $pid = fork();
+ if ( !defined($pid) ) {
+ $bpc->ServerMesg("log Can't fork for tar restore request by $User");
+ ErrorExit("Can't fork for tar restore");
+ }
+ if ( $pid ) {
+ #
+ # This is the parent.
+ #
+ my @fileListTrim = @fileList;
+ if ( @fileListTrim > 10 ) {
+ @fileListTrim = (@fileListTrim[0..9], '...');
+ }
+ $bpc->ServerMesg("log User $User downloaded tar archive for $host,"
+ . " backup $num; files were: "
+ . join(", ", @fileListTrim));
+ return;
+ }
+ #
+ # This is the child. Print the headers and run BackupPC_tarCreate.
+ #
+ my @pathOpts;
+ if ( $In{relative} ) {
+ @pathOpts = ("-r", $pathHdr, "-p", "");
+ }
+ $bpc->ServerDisconnect();
+ print "Content-Type: application/x-gtar\n";
+ print "Content-Transfer-Encoding: binary\n";
+ print "Content-Disposition: attachment; filename=\"restore.tar\"\n\n";
+ exec("$BinDir/BackupPC_tarCreate",
+ "-h", $host,
+ "-n", $num,
+ "-s", $share,
+ @pathOpts,
+ @fileList
+ );
+ } elsif ( $In{type} == 2 ) {
+ #
+ # Provide the selected files via a zip archive.
+ #
+ $SIG{CHLD} = 'IGNORE';
+ my $pid = fork();
+ if ( !defined($pid) ) {
+ $bpc->ServerMesg("log Can't fork for zip restore request by $User");
+ ErrorExit("Can't fork for zip restore");
+ }
+ if ( $pid ) {
+ #
+ # This is the parent.
+ #
+ my @fileListTrim = @fileList;
+ if ( @fileListTrim > 10 ) {
+ @fileListTrim = (@fileListTrim[0..9], '...');
+ }
+ $bpc->ServerMesg("log User $User downloaded zip archive for $host,"
+ . " backup $num; files were: "
+ . join(", ", @fileListTrim));
+ return;
+ }
+ #
+ # This is the child. Print the headers and run BackupPC_tarCreate.
+ #
+ my @pathOpts;
+ if ( $In{relative} ) {
+ @pathOpts = ("-r", $pathHdr, "-p", "");
+ }
+ $bpc->ServerDisconnect();
+ print "Content-Type: application/zip\n";
+ print "Content-Transfer-Encoding: binary\n";
+ print "Content-Disposition: attachment; filename=\"restore.zip\"\n\n";
+ $In{compressLevel} = 5 if ( $In{compressLevel} !~ /^\d+$/ );
+ exec("$BinDir/BackupPC_zipCreate",
+ "-h", $host,
+ "-n", $num,
+ "-c", $In{compressLevel},
+ "-s", $share,
+ @pathOpts,
+ @fileList
+ );
+ } elsif ( $In{type} == 3 ) {
+ #
+ # Do restore directly onto host
+ #
+ if ( !defined($Hosts->{$In{hostDest}}) ) {
+ ErrorExit("Host ${EscapeHTML($In{hostDest})} doesn't exist");
+ }
+ if ( !CheckPermission($In{hostDest}) ) {
+ ErrorExit("You don't have permission to restore onto host"
+ . " ${EscapeHTML($In{hostDest})}");
+ }
+ $fileListStr = "";
+ foreach my $f ( @fileList ) {
+ my $targetFile = $f;
+ (my $strippedShare = $share) =~ s/^\///;
+ (my $strippedShareDest = $In{shareDest}) =~ s/^\///;
+ substr($targetFile, 0, length($pathHdr)) = $In{pathHdr};
+ $fileListStr .= <<EOF;
+<tr><td>$host:/$strippedShare$f</td><td>$In{hostDest}:/$strippedShareDest$targetFile</td></tr>
+EOF
+ }
+ Header("BackupPC: Restore Confirm on $host");
+ print <<EOF;
+${h1("Are you sure?")}
+<p>
+You are about to start a restore directly to the machine $In{hostDest}.
+The following files will be restored to share $In{shareDest}, from
+backup number $num:
+<p>
+<table border>
+<tr><td>Original file/dir</td><td>Will be restored to</td></tr>
+$fileListStr
+</table>
+
+<form action="$MyURL" method="post">
+<input type="hidden" name="host" value="${EscapeHTML($host)}">
+<input type="hidden" name="hostDest" value="${EscapeHTML($In{hostDest})}">
+<input type="hidden" name="shareDest" value="${EscapeHTML($In{shareDest})}">
+<input type="hidden" name="pathHdr" value="${EscapeHTML($In{pathHdr})}">
+<input type="hidden" name="num" value="$num">
+<input type="hidden" name="type" value="4">
+$hiddenStr
+Do you really want to do this?
+<input type="submit" value="$In{action}" name="action">
+<input type="submit" value="No" name="">
+</form>
+EOF
+ Trailer();
+ } elsif ( $In{type} == 4 ) {
+ if ( !defined($Hosts->{$In{hostDest}}) ) {
+ ErrorExit("Host ${EscapeHTML($In{hostDest})} doesn't exist");
+ }
+ if ( !CheckPermission($In{hostDest}) ) {
+ ErrorExit("You don't have permission to restore onto host"
+ . " ${EscapeHTML($In{hostDest})}");
+ }
+ my $hostDest = $1 if ( $In{hostDest} =~ /(.+)/ );
+ my $ipAddr = ConfirmIPAddress($hostDest);
+ #
+ # Prepare and send the restore request. We write the request
+ # information using Data::Dumper to a unique file,
+ # $TopDir/pc/$hostDest/restoreReq.$$.n. We use a file
+ # in case the list of files to restore is very long.
+ #
+ my $reqFileName;
+ for ( my $i = 0 ; ; $i++ ) {
+ $reqFileName = "restoreReq.$$.$i";
+ last if ( !-f "$TopDir/pc/$hostDest/$reqFileName" );
+ }
+ my %restoreReq = (
+ # source of restore is hostSrc, #num, path shareSrc/pathHdrSrc
+ num => $In{num},
+ hostSrc => $host,
+ shareSrc => $share,
+ pathHdrSrc => $pathHdr,
+
+ # destination of restore is hostDest:shareDest/pathHdrDest
+ hostDest => $hostDest,
+ shareDest => $In{shareDest},
+ pathHdrDest => $In{pathHdr},
+
+ # list of files to restore
+ fileList => \@fileList,
+
+ # other info
+ user => $User,
+ reqTime => time,
+ );
+ my($dump) = Data::Dumper->new(
+ [ \%restoreReq],
+ [qw(*RestoreReq)]);
+ $dump->Indent(1);
+ if ( open(REQ, ">$TopDir/pc/$hostDest/$reqFileName") ) {
+ print(REQ $dump->Dump);
+ close(REQ);
+ } else {
+ ErrorExit("Can't open/create "
+ . ${EscapeHTML("$TopDir/pc/$hostDest/$reqFileName")});
+ }
+ $reply = $bpc->ServerMesg("restore $ipAddr"
+ . " $hostDest $User $reqFileName");
+ $str = "Restore requested to host $hostDest, backup #$num,"
+ . " by $User from $ENV{REMOTE_ADDR}";
+ Header("BackupPC: Restore Requested on $hostDest");
+ print <<EOF;
+${h1($str)}
+<p>
+Reply from server was: $reply
+<p>
+Go back to <a href="$MyURL?host=$hostDest">$hostDest home page</a>.
+EOF
+ Trailer();
+ }
+}
+
+sub Action_RestoreFile
+{
+ restoreFile($In{host}, $In{num}, $In{dir});
+}
+
+sub restoreFile
+{
+ my($host, $num, $dir, $skipHardLink, $origName) = @_;
+ my($Privileged) = CheckPermission($host);
+ my($i, $numF, $mangleF, $compressF, $mangle, $compress, $dirUM);
+ #
+ # Some common content (media) types from www.iana.org (via MIME::Types).
+ #
+ my $Ext2ContentType = {
+ 'asc' => 'text/plain',
+ 'avi' => 'video/x-msvideo',
+ 'bmp' => 'image/bmp',
+ 'book' => 'application/x-maker',
+ 'cc' => 'text/plain',
+ 'cpp' => 'text/plain',
+ 'csh' => 'application/x-csh',
+ 'csv' => 'text/comma-separated-values',
+ 'c' => 'text/plain',
+ 'deb' => 'application/x-debian-package',
+ 'doc' => 'application/msword',
+ 'dot' => 'application/msword',
+ 'dtd' => 'text/xml',
+ 'dvi' => 'application/x-dvi',
+ 'eps' => 'application/postscript',
+ 'fb' => 'application/x-maker',
+ 'fbdoc'=> 'application/x-maker',
+ 'fm' => 'application/x-maker',
+ 'frame'=> 'application/x-maker',
+ 'frm' => 'application/x-maker',
+ 'gif' => 'image/gif',
+ 'gtar' => 'application/x-gtar',
+ 'gz' => 'application/x-gzip',
+ 'hh' => 'text/plain',
+ 'hpp' => 'text/plain',
+ 'h' => 'text/plain',
+ 'html' => 'text/html',
+ 'htmlx'=> 'text/html',
+ 'htm' => 'text/html',
+ 'iges' => 'model/iges',
+ 'igs' => 'model/iges',
+ 'jpeg' => 'image/jpeg',
+ 'jpe' => 'image/jpeg',
+ 'jpg' => 'image/jpeg',
+ 'js' => 'application/x-javascript',
+ 'latex'=> 'application/x-latex',
+ 'maker'=> 'application/x-maker',
+ 'mid' => 'audio/midi',
+ 'midi' => 'audio/midi',
+ 'movie'=> 'video/x-sgi-movie',
+ 'mov' => 'video/quicktime',
+ 'mp2' => 'audio/mpeg',
+ 'mp3' => 'audio/mpeg',
+ 'mpeg' => 'video/mpeg',
+ 'mpg' => 'video/mpeg',
+ 'mpp' => 'application/vnd.ms-project',
+ 'pdf' => 'application/pdf',
+ 'pgp' => 'application/pgp-signature',
+ 'php' => 'application/x-httpd-php',
+ 'pht' => 'application/x-httpd-php',
+ 'phtml'=> 'application/x-httpd-php',
+ 'png' => 'image/png',
+ 'ppm' => 'image/x-portable-pixmap',
+ 'ppt' => 'application/powerpoint',
+ 'ppt' => 'application/vnd.ms-powerpoint',
+ 'ps' => 'application/postscript',
+ 'qt' => 'video/quicktime',
+ 'rgb' => 'image/x-rgb',
+ 'rtf' => 'application/rtf',
+ 'rtf' => 'text/rtf',
+ 'shar' => 'application/x-shar',
+ 'shtml'=> 'text/html',
+ 'swf' => 'application/x-shockwave-flash',
+ 'tex' => 'application/x-tex',
+ 'texi' => 'application/x-texinfo',
+ 'texinfo'=> 'application/x-texinfo',
+ 'tgz' => 'application/x-gtar',
+ 'tiff' => 'image/tiff',
+ 'tif' => 'image/tiff',
+ 'txt' => 'text/plain',
+ 'vcf' => 'text/x-vCard',
+ 'vrml' => 'model/vrml',
+ 'wav' => 'audio/x-wav',
+ 'wmls' => 'text/vnd.wap.wmlscript',
+ 'wml' => 'text/vnd.wap.wml',
+ 'wrl' => 'model/vrml',
+ 'xls' => 'application/vnd.ms-excel',
+ 'xml' => 'text/xml',
+ 'xwd' => 'image/x-xwindowdump',
+ 'z' => 'application/x-compress',
+ 'zip' => 'application/zip',
+ };
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can restore backup files"
+ . " for host ${EscapeHTML($host)}." );
+ }
+ ServerConnect();
+ my @Backups = $bpc->BackupInfoRead($host);
+ if ( $host eq "" ) {
+ ErrorExit("Empty host name");
+ }
+ $dir = "/" if ( $dir eq "" );
+ for ( $i = 0 ; $i < @Backups ; $i++ ) {
+ if ( !$Backups[$i]{noFill} ) {
+ $numF = $Backups[$i]{num};
+ $mangleF = $Backups[$i]{mangle};
+ $compressF = $Backups[$i]{compress};
+ }
+ last if ( $Backups[$i]{num} == $num );
+ }
+ $mangle = $Backups[$i]{mangle};
+ $compress = $Backups[$i]{compress};
+ if ( !$Backups[$i]{noFill} ) {
+ # no need to back-fill a filled backup
+ $numF = $mangleF = $compressF = undef;
+ }
+ my $fullPath = "$TopDir/pc/$host/$num/$dir";
+ $fullPath =~ s{/+}{/}g;
+ if ( !-f $fullPath && defined($numF) ) {
+ my $dirF = $dir;
+ my $fullPathF;
+ if ( $mangle && !$mangleF ) {
+ $fullPathF = "$TopDir/pc/$host/$numF/"
+ . $bpc->fileNameUnmangle($dir);
+ } else {
+ $fullPathF = "$TopDir/pc/$host/$numF/$dir";
+ }
+ if ( -f $fullPathF ) {
+ $fullPath = $fullPathF;
+ $compress = $compressF;
+ }
+ }
+ if ( $fullPath =~ m{(^|/)\.\.(/|$)} || !-f $fullPath ) {
+ ErrorExit("Can't restore bad file ${EscapeHTML($fullPath)}");
+ }
+ my $dirUM = $mangle ? $bpc->fileNameUnmangle($dir) : $dir;
+ my $attr = BackupPC::Attrib->new({compress => $compress});
+ my $fullDir = $fullPath;
+ $fullDir =~ s{(.*)/.*}{$1};
+ my $fileName = $1 if ( $dirUM =~ /.*\/(.*)/ );
+ $attr->read($fullDir) if ( -f $attr->fileName($fullDir) );
+ my $a = $attr->get($fileName);
+
+ my $f = BackupPC::FileZIO->open($fullPath, 0, $compress);
+ my $data;
+ if ( !$skipHardLink && $a->{type} == BPC_FTYPE_HARDLINK ) {
+ #
+ # hardlinks should look like the file they point to
+ #
+ my $linkName;
+ while ( $f->read(\$data, 65536) > 0 ) {
+ $linkName .= $data;
+ }
+ $f->close;
+ $linkName =~ s/^\.\///;
+ my $share = $1 if ( $dir =~ /^\/?(.*?)\// );
+ restoreFile($host, $num,
+ "$share/" . ($mangle ? $bpc->fileNameMangle($linkName)
+ : $linkName), 1, $dir);
+ return;
+ }
+ $dirUM =~ s{//}{/}g;
+ $fullPath =~ s{//}{/}g;
+ $bpc->ServerMesg("log User $User recovered file $dirUM ($fullPath)");
+ $dir = $origName if ( defined($origName) );
+ $dirUM = $mangle ? $bpc->fileNameUnmangle($dir) : $dir;
+ my $ext = $1 if ( $dirUM =~ /\.([^\/\.]+)$/ );
+ my $contentType = $Ext2ContentType->{lc($ext)}
+ || "application/octet-stream";
+ $fileName = $1 if ( $dirUM =~ /.*\/(.*)/ );
+ $fileName =~ s/"/\\"/g;
+ print "Content-Type: $contentType\n";
+ print "Content-Transfer-Encoding: binary\n";
+ print "Content-Disposition: attachment; filename=\"$fileName\"\n\n";
+ while ( $f->read(\$data, 1024 * 1024) > 0 ) {
+ print STDOUT $data;
+ }
+ $f->close;
+}
+
+sub Action_HostInfo
+{
+ my $host = $1 if ( $In{host} =~ /(.*)/ );
+ my($statusStr, $startIncrStr);
+
+ $host =~ s/^\s+//;
+ $host =~ s/\s+$//;
+ return Action_GeneralInfo() if ( $host eq "" );
+ $host = lc($host)
+ if ( !-d "$TopDir/pc/$host" && -d "$TopDir/pc/" . lc($host) );
+ if ( $host =~ /\.\./ || !-d "$TopDir/pc/$host" ) {
+ #
+ # try to lookup by user name
+ #
+ if ( !defined($Hosts->{$host}) ) {
+ foreach my $h ( keys(%$Hosts) ) {
+ if ( $Hosts->{$h}{user} eq $host
+ || lc($Hosts->{$h}{user}) eq lc($host) ) {
+ $host = $h;
+ last;
+ }
+ }
+ CheckPermission();
+ ErrorExit("Unknown host or user ${EscapeHTML($host)}")
+ if ( !defined($Hosts->{$host}) );
+ }
+ $In{host} = $host;
+ }
+ GetStatusInfo("host($host)");
+ $bpc->ConfigRead($host);
+ %Conf = $bpc->Conf();
+ my $Privileged = CheckPermission($host);
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can view information about"
+ . " host ${EscapeHTML($host)}." );
+ }
+ ReadUserEmailInfo();
+
+ my @Backups = $bpc->BackupInfoRead($host);
+ my($str, $sizeStr, $compStr, $errStr, $warnStr);
+ for ( my $i = 0 ; $i < @Backups ; $i++ ) {
+ my $startTime = timeStamp2($Backups[$i]{startTime});
+ my $dur = $Backups[$i]{endTime} - $Backups[$i]{startTime};
+ $dur = 1 if ( $dur <= 0 );
+ my $duration = sprintf("%.1f", $dur / 60);
+ my $MB = sprintf("%.1f", $Backups[$i]{size} / (1024*1024));
+ my $MBperSec = sprintf("%.2f", $Backups[$i]{size} / (1024*1024*$dur));
+ my $MBExist = sprintf("%.1f", $Backups[$i]{sizeExist} / (1024*1024));
+ my $MBNew = sprintf("%.1f", $Backups[$i]{sizeNew} / (1024*1024));
+ my($MBExistComp, $ExistComp, $MBNewComp, $NewComp);
+ if ( $Backups[$i]{sizeExist} && $Backups[$i]{sizeExistComp} ) {
+ $MBExistComp = sprintf("%.1f", $Backups[$i]{sizeExistComp}
+ / (1024 * 1024));
+ $ExistComp = sprintf("%.1f%%", 100 *
+ (1 - $Backups[$i]{sizeExistComp} / $Backups[$i]{sizeExist}));
+ }
+ if ( $Backups[$i]{sizeNew} && $Backups[$i]{sizeNewComp} ) {
+ $MBNewComp = sprintf("%.1f", $Backups[$i]{sizeNewComp}
+ / (1024 * 1024));
+ $NewComp = sprintf("%.1f%%", 100 *
+ (1 - $Backups[$i]{sizeNewComp} / $Backups[$i]{sizeNew}));
+ }
+ my $age = sprintf("%.1f", (time - $Backups[$i]{startTime}) / (24*3600));
+ my $browseURL = "$MyURL?action=browse&host=$host&num=$Backups[$i]{num}";
+ my $filled = $Backups[$i]{noFill} ? "no" : "yes";
+ $filled .= " ($Backups[$i]{fillFromNum}) "
+ if ( $Backups[$i]{fillFromNum} ne "" );
+ $str .= <<EOF;
+<tr><td align="center"> <a href="$browseURL">$Backups[$i]{num}</a> </td>
+ <td align="center"> $Backups[$i]{type} </td>
+ <td align="center"> $filled </td>
+ <td align="right"> $startTime </td>
+ <td align="right"> $duration </td>
+ <td align="right"> $age </td>
+ <td align="left"> <tt>$TopDir/pc/$host/$Backups[$i]{num}</tt> </td></tr>
+EOF
+ $sizeStr .= <<EOF;
+<tr><td align="center"> <a href="$browseURL">$Backups[$i]{num}</a> </td>
+ <td align="center"> $Backups[$i]{type} </td>
+ <td align="right"> $Backups[$i]{nFiles} </td>
+ <td align="right"> $MB </td>
+ <td align="right"> $MBperSec </td>
+ <td align="right"> $Backups[$i]{nFilesExist} </td>
+ <td align="right"> $MBExist </td>
+ <td align="right"> $Backups[$i]{nFilesNew} </td>
+ <td align="right"> $MBNew </td>
+</tr>
+EOF
+ $Backups[$i]{compress} ||= "off";
+ $compStr .= <<EOF;
+<tr><td align="center"> <a href="$browseURL">$Backups[$i]{num}</a> </td>
+ <td align="center"> $Backups[$i]{type} </td>
+ <td align="center"> $Backups[$i]{compress} </td>
+ <td align="right"> $MBExist </td>
+ <td align="right"> $MBExistComp </td>
+ <td align="right"> $ExistComp </td>
+ <td align="right"> $MBNew </td>
+ <td align="right"> $MBNewComp </td>
+ <td align="right"> $NewComp </td>
+</tr>
+EOF
+ $errStr .= <<EOF;
+<tr><td align="center"> <a href="$browseURL">$Backups[$i]{num}</a> </td>
+ <td align="center"> $Backups[$i]{type} </td>
+ <td align="center"> <a href="$MyURL?action=view&type=XferLOG&num=$Backups[$i]{num}&host=$host">XferLOG</a>,
+ <a href="$MyURL?action=view&type=XferErr&num=$Backups[$i]{num}&host=$host">Errors</a> </td>
+ <td align="right"> $Backups[$i]{xferErrs} </td>
+ <td align="right"> $Backups[$i]{xferBadFile} </td>
+ <td align="right"> $Backups[$i]{xferBadShare} </td>
+ <td align="right"> $Backups[$i]{tarErrs} </td></tr>
+EOF
+ }
+
+ my @Restores = $bpc->RestoreInfoRead($host);
+ my $restoreStr;
+
+ for ( my $i = 0 ; $i < @Restores ; $i++ ) {
+ my $startTime = timeStamp2($Restores[$i]{startTime});
+ my $dur = $Restores[$i]{endTime} - $Restores[$i]{startTime};
+ $dur = 1 if ( $dur <= 0 );
+ my $duration = sprintf("%.1f", $dur / 60);
+ my $MB = sprintf("%.1f", $Restores[$i]{size} / (1024*1024));
+ my $MBperSec = sprintf("%.2f", $Restores[$i]{size} / (1024*1024*$dur));
+ $restoreStr .= <<EOF;
+<tr><td align="center"><a href="$MyURL?action=restoreInfo&num=$Restores[$i]{num}&host=$host">$Restores[$i]{num}</a> </td>
+ <td align="center"> $Restores[$i]{result} </td>
+ <td align="right"> $startTime </td>
+ <td align="right"> $duration </td>
+ <td align="right"> $Restores[$i]{nFiles} </td>
+ <td align="right"> $MB </td>
+ <td align="right"> $Restores[$i]{tarCreateErrs} </td>
+ <td align="right"> $Restores[$i]{xferErrs} </td>
+</tr>
+EOF
+ }
+ $restoreStr = <<EOF if ( $restoreStr ne "" );
+${h2("Restore Summary")}
+<p>
+Click on the restore number for more details.
+<table border>
+<tr><td align="center"> Restore# </td>
+ <td align="center"> Result </td>
+ <td align="right"> Start Date</td>
+ <td align="right"> Dur/mins</td>
+ <td align="right"> #files </td>
+ <td align="right"> MB </td>
+ <td align="right"> #tar errs </td>
+ <td align="right"> #xferErrs </td>
+</tr>
+$restoreStr
+</table>
+<p>
+EOF
+
+ if ( @Backups == 0 ) {
+ $warnStr = "<h2> This PC has never been backed up!! </h2>\n";
+ }
+ if ( defined($Hosts->{$host}) ) {
+ my $user = $Hosts->{$host}{user};
+ if ( $user ne "" ) {
+ $statusStr .= <<EOF;
+<li>This PC is used by ${UserLink($user)}.
+EOF
+ }
+ if ( defined($UserEmailInfo{$user})
+ && $UserEmailInfo{$user}{lastHost} eq $host ) {
+ my $mailTime = timeStamp2($UserEmailInfo{$user}{lastTime});
+ my $subj = $UserEmailInfo{$user}{lastSubj};
+ $statusStr .= <<EOF;
+<li>Last email sent to ${UserLink($user)} was at $mailTime, subject "$subj".
+EOF
+ }
+ }
+ if ( defined($Jobs{$host}) ) {
+ my $startTime = timeStamp2($Jobs{$host}{startTime});
+ (my $cmd = $Jobs{$host}{cmd}) =~ s/$BinDir\///g;
+ $statusStr .= <<EOF;
+<li>The command $cmd is currently running for $host, started $startTime.
+EOF
+ }
+ if ( $StatusHost{BgQueueOn} ) {
+ $statusStr .= <<EOF;
+<li>Host $host is queued on the background queue (will be backed up soon).
+EOF
+ }
+ if ( $StatusHost{UserQueueOn} ) {
+ $statusStr .= <<EOF;
+<li>Host $host is queued on the user queue (will be backed up soon).
+EOF
+ }
+ if ( $StatusHost{CmdQueueOn} ) {
+ $statusStr .= <<EOF;
+<li>A command for $host is on the command queue (will run soon).
+EOF
+ }
+ my $startTime = timeStamp2($StatusHost{endTime} == 0 ?
+ $StatusHost{startTime} : $StatusHost{endTime});
+ my $reason = "";
+ if ( $StatusHost{reason} ne "" ) {
+ $reason = " ($StatusHost{reason})";
+ }
+ $statusStr .= <<EOF;
+<li>Last status is state "$StatusHost{state}"$reason
+ as of $startTime.
+EOF
+ if ( $StatusHost{error} ne "" ) {
+ $statusStr .= <<EOF;
+<li>Last error is "${EscapeHTML($StatusHost{error})}"
+EOF
+ }
+ my $priorStr = "Pings";
+ if ( $StatusHost{deadCnt} > 0 ) {
+ $statusStr .= <<EOF;
+<li>Pings to $host have failed $StatusHost{deadCnt} consecutive times.
+EOF
+ $priorStr = "Prior to that, pings";
+ }
+ if ( $StatusHost{aliveCnt} > 0 ) {
+ $statusStr .= <<EOF;
+<li>$priorStr to $host have succeeded $StatusHost{aliveCnt}
+ consecutive times.
+EOF
+ if ( $StatusHost{aliveCnt} >= $Conf{BlackoutGoodCnt}
+ && $Conf{BlackoutGoodCnt} >= 0 && $Conf{BlackoutHourBegin} >= 0
+ && $Conf{BlackoutHourEnd} >= 0 ) {
+ my(@days) = qw(Sun Mon Tue Wed Thu Fri Sat);
+ my($days) = join(", ", @days[@{$Conf{BlackoutWeekDays}}]);
+ my($t0) = sprintf("%d:%02d", $Conf{BlackoutHourBegin},
+ 60 * ($Conf{BlackoutHourBegin}
+ - int($Conf{BlackoutHourBegin})));
+ my($t1) = sprintf("%d:%02d", $Conf{BlackoutHourEnd},
+ 60 * ($Conf{BlackoutHourEnd}
+ - int($Conf{BlackoutHourEnd})));
+ $statusStr .= <<EOF;
+<li>Because $host has been on the network at least $Conf{BlackoutGoodCnt}
+consecutive times, it will not be backed up from $t0 to $t1 on $days.
+EOF
+ }
+ }
+ if ( $StatusHost{backoffTime} > time ) {
+ my $hours = sprintf("%.1f", ($StatusHost{backoffTime} - time) / 3600);
+ $statusStr .= <<EOF;
+<li>Backups are deferred for $hours hours
+ (<a href="$MyURL?action=Stop/Dequeue%20Backup&host=$host">change this
+ number</a>).
+EOF
+ }
+ if ( @Backups ) {
+ # only allow incremental if there are already some backups
+ $startIncrStr = <<EOF;
+<input type="submit" value="Start Incr Backup" name="action">
+EOF
+ }
+
+ Header("BackupPC: Host $host Backup Summary");
+ print <<EOF;
+${h1("Host $host Backup Summary")}
+<p>
+$warnStr
+<ul>
+$statusStr
+</ul>
+
+${h2("User Actions")}
+<p>
+<form action="$MyURL" method="get">
+<input type="hidden" name="host" value="$host">
+$startIncrStr
+<input type="submit" value="Start Full Backup" name="action">
+<input type="submit" value="Stop/Dequeue Backup" name="action">
+</form>
+
+${h2("Backup Summary")}
+<p>
+Click on the backup number to browse and restore backup files.
+<table border>
+<tr><td align="center"> Backup# </td>
+ <td align="center"> Type </td>
+ <td align="center"> Filled </td>
+ <td align="center"> Start Date </td>
+ <td align="center"> Duration/mins </td>
+ <td align="center"> Age/days </td>
+ <td align="center"> Server Backup Path </td>
+</tr>
+$str
+</table>
+<p>
+
+$restoreStr
+
+${h2("Xfer Error Summary")}
+<p>
+<table border>
+<tr><td align="center"> Backup# </td>
+ <td align="center"> Type </td>
+ <td align="center"> View </td>
+ <td align="center"> #Xfer errs </td>
+ <td align="center"> #bad files </td>
+ <td align="center"> #bad share </td>
+ <td align="center"> #tar errs </td>
+</tr>
+$errStr
+</table>
+<p>
+
+${h2("File Size/Count Reuse Summary")}
+<p>
+Existing files are those already in the pool; new files are those added
+to the pool.
+Empty files and SMB errors aren't counted in the reuse and new counts.
+<table border>
+<tr><td colspan="2"></td>
+ <td align="center" colspan="3"> Totals </td>
+ <td align="center" colspan="2"> Existing Files </td>
+ <td align="center" colspan="2"> New Files </td>
+</tr>
+<tr>
+ <td align="center"> Backup# </td>
+ <td align="center"> Type </td>
+ <td align="center"> #Files </td>
+ <td align="center"> Size/MB </td>
+ <td align="center"> MB/sec </td>
+ <td align="center"> #Files </td>
+ <td align="center"> Size/MB </td>
+ <td align="center"> #Files </td>
+ <td align="center"> Size/MB </td>
+</tr>
+$sizeStr
+</table>
+<p>
+
+${h2("Compression Summary")}
+<p>
+Compression performance for files already in the pool and newly
+compressed files.
+<table border>
+<tr><td colspan="3"></td>
+ <td align="center" colspan="3"> Existing Files </td>
+ <td align="center" colspan="3"> New Files </td>
+</tr>
+<tr><td align="center"> Backup# </td>
+ <td align="center"> Type </td>
+ <td align="center"> Comp Level </td>
+ <td align="center"> Size/MB </td>
+ <td align="center"> Comp/MB </td>
+ <td align="center"> Comp </td>
+ <td align="center"> Size/MB </td>
+ <td align="center"> Comp/MB </td>
+ <td align="center"> Comp </td>
+</tr>
+$compStr
+</table>
+<p>
+EOF
+ Trailer();
+}
+
+sub Action_GeneralInfo
+{
+ GetStatusInfo("info jobs hosts queueLen");
+ my $Privileged = CheckPermission();
+
+ my($jobStr, $statusStr, $tarPidHdr, $ rivLinks);
+ foreach my $host ( sort(keys(%Jobs)) ) {
+ my $startTime = timeStamp2($Jobs{$host}{startTime});
+ next if ( $host eq $bpc->trashJob
+ && $Jobs{$host}{processState} ne "running" );
+ $Jobs{$host}{type} = $Status{$host}{type}
+ if ( $Jobs{$host}{type} eq "" && defined($Status{$host}));
+ (my $cmd = $Jobs{$host}{cmd}) =~ s/$BinDir\///g;
+ $jobStr .= <<EOF;
+<tr><td> ${HostLink($host)} </td>
+ <td align="center"> $Jobs{$host}{type} </td>
+ <td align="center"> ${UserLink($Hosts->{$host}{user})} </td>
+ <td> $startTime </td>
+ <td> $cmd </td>
+ <td align="center"> $Jobs{$host}{pid} </td>
+ <td align="center"> $Jobs{$host}{xferPid} </td>
+EOF
+ if ( $Jobs{$host}{tarPid} > 0 ) {
+ $jobStr .= " <td align=\"center\"> $Jobs{$host}{tarPid} </td>\n";
+ $tarPidHdr ||= "<td align=\"center\"> tar PID </td>\n";
+ }
+ $jobStr .= "</tr>\n";
+ }
+ foreach my $host ( sort(keys(%Status)) ) {
+ next if ( $Status{$host}{reason} ne "backup failed" );
+ my $startTime = timeStamp2($Status{$host}{startTime});
+ my($errorTime, $XferViewStr);
+ if ( $Status{$host}{errorTime} > 0 ) {
+ $errorTime = timeStamp2($Status{$host}{errorTime});
+ }
+ if ( -f "$TopDir/pc/$host/SmbLOG.bad"
+ || -f "$TopDir/pc/$host/SmbLOG.bad.z"
+ || -f "$TopDir/pc/$host/XferLOG.bad"
+ || -f "$TopDir/pc/$host/XferLOG.bad.z"
+ ) {
+ $XferViewStr = <<EOF;
+<a href="$MyURL?action=view&type=XferLOGbad&host=$host">XferLOG</a>,
+<a href="$MyURL?action=view&type=XferErrbad&host=$host">XferErr</a>
+EOF
+ } else {
+ $XferViewStr = "";
+ }
+ (my $shortErr = $Status{$host}{error}) =~ s/(.{48}).*/$1.../;
+ $statusStr .= <<EOF;
+<tr><td> ${HostLink($host)} </td>
+ <td align="center"> $Status{$host}{type} </td>
+ <td align="center"> ${UserLink($Hosts->{$host}{user})} </td>
+ <td align="right"> $startTime </td>
+ <td> $XferViewStr </td>
+ <td align="right"> $errorTime </td>
+ <td> ${EscapeHTML($shortErr)} </td></tr>
+EOF
+ }
+ my $now = timeStamp2(time);
+ my $nextWakeupTime = timeStamp2($Info{nextWakeup});
+ my $DUlastTime = timeStamp2($Info{DUlastValueTime});
+ my $DUmaxTime = timeStamp2($Info{DUDailyMaxTime});
+ my $numBgQueue = $QueueLen{BgQueue};
+ my $numUserQueue = $QueueLen{UserQueue};
+ my $numCmdQueue = $QueueLen{CmdQueue};
+ my $serverStartTime = timeStamp2($Info{startTime});
+ my $poolInfo = genPoolInfo("pool", \%Info);
+ my $cpoolInfo = genPoolInfo("cpool", \%Info);
+ if ( $Info{poolFileCnt} > 0 && $Info{cpoolFileCnt} > 0 ) {
+ $poolInfo = <<EOF;
+<li>Uncompressed pool:
+<ul>
+$poolInfo
+</ul>
+<li>Compressed pool:
+<ul>
+$cpoolInfo
+</ul>
+EOF
+ } elsif ( $Info{cpoolFileCnt} > 0 ) {
+ $poolInfo = $cpoolInfo;
+ }
+ Header("BackupPC: Server Status");
+ print <<EOF;
+
+${h1("BackupPC Server Status")}
+<p>
+
+${h2("General Server Information")}
+
+<ul>
+<li> The server's PID is $Info{pid} on host $Conf{ServerHost},
+ version $Info{Version}, started at $serverStartTime.
+<li> This status was generated at $now.
+<li> PCs will be next queued at $nextWakeupTime.
+<li> Other info:
+ <ul>
+ <li>$numBgQueue pending backup requests from last scheduled wakeup,
+ <li>$numUserQueue pending user backup requests,
+ <li>$numCmdQueue pending command requests,
+ $poolInfo
+ <li>Pool file system was recently at $Info{DUlastValue}%
+ ($DUlastTime), today's max is $Info{DUDailyMax}% ($DUmaxTime)
+ and yesterday's max was $Info{DUDailyMaxPrev}%.
+ </ul>
+</ul>
+
+${h2("Currently Running Jobs")}
+<p>
+<table border>
+<tr><td> Host </td>
+ <td> Type </td>
+ <td> User </td>
+ <td> Start Time </td>
+ <td> Command </td>
+ <td align="center"> PID </td>
+ <td> Xfer PID </td>
+ $tarPidHdr</tr>
+$jobStr
+</table>
+<p>
+
+${h2("Failures that need attention")}
+<p>
+<table border>
+<tr><td align="center"> Host </td>
+ <td align="center"> Type </td>
+ <td align="center"> User </td>
+ <td align="center"> Last Try </td>
+ <td align="center"> Details </td>
+ <td align="center"> Error Time </td>
+ <td> Last error (other than no ping) </td></tr>
+$statusStr
+</table>
+EOF
+ Trailer();
+}
+
+sub Action_RestoreInfo
+{
+ my $Privileged = CheckPermission($In{host});
+ my $host = $1 if ( $In{host} =~ /(.*)/ );
+ my $num = $In{num};
+ my $i;
+
+ if ( !$Privileged ) {
+ ErrorExit("Only privileged users can view restore information." );
+ }
+ #
+ # Find the requested restore
+ #
+ my @Restores = $bpc->RestoreInfoRead($host);
+ for ( $i = 0 ; $i < @Restores ; $i++ ) {
+ last if ( $Restores[$i]{num} == $num );
+ }
+ if ( $i >= @Restores ) {
+ ErrorExit("Restore number $num for host ${EscapeHTML($host)} does"
+ . " not exist.");
+ }
+
+ %RestoreReq = ();
+ do "$TopDir/pc/$host/RestoreInfo.$Restores[$i]{num}"
+ if ( -f "$TopDir/pc/$host/RestoreInfo.$Restores[$i]{num}" );
+
+ my $startTime = timeStamp2($Restores[$i]{startTime});
+ my $reqTime = timeStamp2($RestoreReq{reqTime});
+ my $dur = $Restores[$i]{endTime} - $Restores[$i]{startTime};
+ $dur = 1 if ( $dur <= 0 );
+ my $duration = sprintf("%.1f", $dur / 60);
+ my $MB = sprintf("%.1f", $Restores[$i]{size} / (1024*1024));
+ my $MBperSec = sprintf("%.2f", $Restores[$i]{size} / (1024*1024*$dur));
+
+ my $fileListStr = "";
+ foreach my $f ( @{$RestoreReq{fileList}} ) {
+ my $targetFile = $f;
+ (my $strippedShareSrc = $RestoreReq{shareSrc}) =~ s/^\///;
+ (my $strippedShareDest = $RestoreReq{shareDest}) =~ s/^\///;
+ substr($targetFile, 0, length($RestoreReq{pathHdrSrc}))
+ = $RestoreReq{pathHdrDest};
+ $fileListStr .= <<EOF;
+<tr><td>$RestoreReq{hostSrc}:/$strippedShareSrc$f</td><td>$RestoreReq{hostDest}:/$strippedShareDest$targetFile</td></tr>
+EOF
+ }
+
+ Header("BackupPC: Restore #$num details for $host");
+ print <<EOF;
+${h1("Restore #$num Details for $host")}
+<p>
+<table border>
+<tr><td> Number </td><td> $Restores[$i]{num} </td></tr>
+<tr><td> Requested by </td><td> $RestoreReq{user} </td></tr>
+<tr><td> Request time </td><td> $reqTime </td></tr>
+<tr><td> Result </td><td> $Restores[$i]{result} </td></tr>
+<tr><td> Error Message </td><td> $Restores[$i]{errorMsg} </td></tr>
+<tr><td> Source host </td><td> $RestoreReq{hostSrc} </td></tr>
+<tr><td> Source backup num </td><td> $RestoreReq{num} </td></tr>
+<tr><td> Source share </td><td> $RestoreReq{shareSrc} </td></tr>
+<tr><td> Destination host </td><td> $RestoreReq{hostDest} </td></tr>
+<tr><td> Destination share </td><td> $RestoreReq{shareDest} </td></tr>
+<tr><td> Start time </td><td> $startTime </td></tr>
+<tr><td> Duration </td><td> $duration min </td></tr>
+<tr><td> Number of files </td><td> $Restores[$i]{nFiles} </td></tr>
+<tr><td> Total size </td><td> ${MB} MB </td></tr>
+<tr><td> Transfer rate </td><td> $MBperSec MB/sec </td></tr>
+<tr><td> TarCreate errors </td><td> $Restores[$i]{tarCreateErrs} </td></tr>
+<tr><td> Xfer errors </td><td> $Restores[$i]{xferErrs} </td></tr>
+<tr><td> Xfer log file </td><td>
+<a href="$MyURL?action=view&type=RestoreLOG&num=$Restores[$i]{num}&host=$host">View</a>,
+<a href="$MyURL?action=view&type=RestoreErr&num=$Restores[$i]{num}&host=$host">Errors</a>
+</tr></tr>
+</table>
+<p>
+${h1("File/Directory list")}
+<p>
+<table border>
+<tr><td>Original file/dir</td><td>Restored to</td></tr>
+$fileListStr
+</table>
+EOF
+ Trailer();
+}
+
+###########################################################################
+# Miscellaneous subroutines
+###########################################################################
+
+sub timeStamp2
+{
+ my($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst)
+ = localtime($_[0] == 0 ? time : $_[0] );
+ $year += 1900;
+ $mon++;
+ if ( $Conf{CgiDateFormatMMDD} ) {
+ return sprintf("$mon/$mday %02d:%02d", $hour, $min);
+ } else {
+ return sprintf("$mday/$mon %02d:%02d", $hour, $min);
+ }
+}
+
+sub HostLink
+{
+ my($host) = @_;
+ my($s);
+ if ( defined($Hosts->{$host}) || defined($Status{$host}) ) {
+ $s = "<a href=\"$MyURL?host=$host\">$host</a>";
+ } else {
+ $s = $host;
+ }
+ return \$s;
+}
+
+sub UserLink
+{
+ my($user) = @_;
+ my($s);
+
+ return \$user if ( $user eq ""
+ || $Conf{CgiUserUrlCreate} eq "" );
+ if ( $Conf{CgiUserHomePageCheck} eq ""
+ || -f sprintf($Conf{CgiUserHomePageCheck}, $user, $user, $user) ) {
+ $s = "<a href=\""
+ . sprintf($Conf{CgiUserUrlCreate}, $user, $user, $user)
+ . "\">$user</a>";
+ } else {
+ $s = $user;
+ }
+ return \$s;
+}
+
+sub EscapeHTML
+{
+ my($s) = @_;
+ $s =~ s/&/&/g;
+ $s =~ s/\"/"/g;
+ $s =~ s/>/>/g;
+ $s =~ s/</</g;
+ $s =~ s{([^[:print:]])}{sprintf("&#x%02X;", ord($1));}eg;
+ return \$s;
+}
+
+##sub URIEncode
+##{
+## my($s) = @_;
+## $s =~ s{(['"&%[:^print:]])}{sprintf("%%%02X", ord($1));}eg;
+## return \$s;
+##}
+
+sub ErrorExit
+{
+ my(@mesg) = @_;
+ my($head) = shift(@mesg);
+ my($mesg) = join("</p>\n<p>", @mesg);
+ $Conf{CgiHeaderFontType} ||= "arial";
+ $Conf{CgiHeaderFontSize} ||= "3";
+ $Conf{CgiNavBarBgColor} ||= "#ddeeee";
+ $Conf{CgiHeaderBgColor} ||= "#99cc33";
+
+ $bpc->ServerMesg("log User $User (host=$In{host}) got CGI error: $head")
+ if ( defined($bpc) );
+ Header("BackupPC: Error");
+ print <<EOF;
+${h1("Error: $head")}
+<p>$mesg</p>
+EOF
+ Trailer();
+ exit(1);
+}
+
+sub ServerConnect
+{
+ #
+ # Verify that the server connection is ok
+ #
+ return if ( $bpc->ServerOK() );
+ $bpc->ServerDisconnect();
+ if ( my $err = $bpc->ServerConnect($Conf{ServerHost}, $Conf{ServerPort}) ) {
+ ErrorExit(
+ "Unable to connect to BackupPC server",
+ "This CGI script ($MyURL) is unable to connect to the BackupPC"
+ . " server on $Conf{ServerHost} port $Conf{ServerPort}. The error"
+ . " was: $err.",
+ "Perhaps the BackupPC server is not running or there is a "
+ . " configuration err