aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--AUTHORS8
-rw-r--r--ChangeLog92
-rw-r--r--INSTALL2
-rw-r--r--Makefile.am2
-rw-r--r--configure.in2
-rw-r--r--examples/example.dup21
-rw-r--r--examples/example.rdiff14
-rw-r--r--examples/example.rsync29
-rw-r--r--examples/example.sys15
-rw-r--r--handlers/Makefile.am8
-rw-r--r--handlers/dsync.in345
-rw-r--r--handlers/dup.helper.in35
-rw-r--r--handlers/dup.in38
-rw-r--r--handlers/maildir.in3
-rw-r--r--handlers/mysql.in17
-rw-r--r--handlers/rdiff.helper.in25
-rw-r--r--handlers/rdiff.in13
-rw-r--r--handlers/rsync.in155
-rw-r--r--handlers/sys.helper.in7
-rw-r--r--handlers/sys.in123
-rw-r--r--handlers/tar.helper.in3
-rw-r--r--handlers/tar.in6
-rw-r--r--lib/tools.in38
-rwxr-xr-xsrc/backupninja.in56
24 files changed, 888 insertions, 169 deletions
diff --git a/AUTHORS b/AUTHORS
index 1319688..06c70c6 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -43,3 +43,11 @@ aihtdikh -- Allow 'when = XXX' with spaces in .sh files.
Chris Lamb <lamby@debian.org> -- rdiff.helper bugfix
Yuval Kogman <nothingmuch@woobling.org> -- RackSpace's CloudFiles support for duplicity
exobuzz - mysql bugfixes
+Glennie Vignarajah <glennie@glennie.fr> -- mysql bugfix
+ddpaul <paul@reic.ru> -- rsync bugfix
+ulrich -- duplicity bugfix preliminary patch
+David Gasaway <dave@gasaway.org> -- rdiff's output_as_info option
+Pierre ROUDIER <contact@pierreroudier.net> -- xz and test mode for tar handler
+olb <olb@nebkha.net> -- update of duplicity/paramiko SSH options handling
+Alexander Mette <mail@amette.eu> -- duplicity bugfix
+Dominik George <nik@naturalnet.de> -- Support using a different passphrase for the signing key from the one used for the encryption key in the dup handler
diff --git a/ChangeLog b/ChangeLog
index 1e20e00..2224984 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,72 @@
-version 1.0 -- UNRELEASED
+version 1.0.2 -- UNRELEASED
+ handler changes
+ dsync:
+ . Miscellaneous improvements to this experimental handler.
+ dup:
+ . For local backups, check that the destination directory exists.
+ Thanks to ulrich for the preliminary patch. (Redmine#4049)
+ . Gracefully handle legacy spaces between -o and IdentityFile.
+ . Don't remove useful signature files with --extra-clean,
+ on duplicity versions that have a sane handling of the cache.
+ Thanks to Alexander Mette <mail@amette.eu> for the patch!
+ (Closes: Redmine#6357)
+ . Fix buggy version comparison. (Redmine#6746)
+ . Support using a different passphrase for the signing key
+ from the one used for the encryption key.
+ Thanks to Dominik George <nik@naturalnet.de> for the patch!
+ ยท Run duplicity in a C locales environment (Closes: #736280).
+ Thanks to Jonathan Dupart <jonathan@dupart.org> for the report,
+ and the initial patch!
+ mysql:
+ . Make "nodata" option compatible with compress=no.
+ . Fix non-qualified table name extraction. (Closes: Redmine#4373)
+ rdiff:
+ . Add option to include rdiff-backup output in reports.
+ Thanks to David Gasaway <dave@gasaway.org> for the patch!
+ rsync:
+ . Solve quoting issue with "su". (Closes: #683731, Redmine#4019)
+ sys:
+ . Fix LUKS header backup to properly detect partitions.
+ . Provide the ability to backup the MBR for every device found,
+ and to backup the BIOS (if the flashrom program is installed,
+ and the mainboard is supported).
+ tar:
+ . Support test mode and xz compression.
+ Thanks to Pierre ROUDIER <contact@pierreroudier.net> for the patches.
+ helper changes
+ rdiff:
+ . Don't give misleading information regarding required fields.
+ (Redmine#4410)
+ . Support output_as_info.
+ Thanks to David Gasaway <dave@gasaway.org> for the patch!
+ backupninja changes
+ . Indentation fixes, thanks to exobuzz. (Redmine#6726)
+ documentation changes
+ . Update INSTALL file to add some missing recommended programs.
+
+version 1.0.1 -- June 29, 2012
+ handler changes
+ rsync:
+ . Issue warnings, not fatal errors, on non-fatal rsync errors.
+ (Redmine#3966)
+
+version 1.0 -- June 15, 2012
+ The "happy birthdays" release!
+ handler changes
+ mysql:
+ . Use --skip-events when backing up the performance_schema database.
+ (Closes: #673572)
+ rsync:
+ . Generate excludes command-line snippet the same way as
+ the duplicity handler does.
+ . Run rsync command-line through a shell, so that single-quotes
+ around excludes are interpreted (Closes: #677410)
+ sys:
+ . Don't execute /usr/bin/lspci or /sbin/modinfo when $hardware == "no"
+ backupninja changes
+ . Make it clear what lockfile could not be acquired, if any.
+
+version 1.0-rc1 -- May 15, 2012
handler changes
dup:
. Make the .dup generated by ninjahelper more consistent with
@@ -10,6 +78,8 @@ version 1.0 -- UNRELEASED
(Closes: #657201)
. Report failure output at error loglevel so that it is emailed
(Closes: #536858)
+ maildir:
+ . Remove 'loadlimit' parameter - it is not used anywhere.
mysql:
. Don't attempt to dump performance_schema database (Redmine#3741).
pgsql:
@@ -22,15 +92,29 @@ version 1.0 -- UNRELEASED
. Fix metadata rotation.
. Allow disabling rotation or setting 2 days as minimum for backup
increments in rsync short format (Redmine#2107).
- . Use fatal instead of custom "exit 1" (Redmine#3721).
. Abort on rsync error (Redmine#3692).
. Cleanup orphaned metadata (Redmine#3727).
- . Use the backup start time and not the time the backup was finished
- (Closes: #654192)
+ . Use the backup start time and not the time the backup was finished.
+ (Closes: #654192).
+ . Use 'debug', 'fatal' and 'warning' functions instead of regular echo
+ and exit (Redmine#3840, Redmine#3721).
+ . Quoting $starttime (Redmine#3868).
+ . Validate created date on long_rotation to avoid too many arguments
+ at comparison (Redmine#3868).
+ . Quoting $exclude and $excludes and avoiding a for loop on $exclude
+ to not expand wildcards in beforehand (Redmine#3882).
+ . Quote excludes (Redmine#3882).
+ . Changing remaining 'exit' to 'fatal' at rsync handler (Redmine#3721).
+ . Removing duplicated locking support (Redmine#3838).
+ . Documenting rotation parameters at example.rsync (Redmine#3891).
+ . Ensure that a non-zero rsync exit status is caught (Redmine#3892).
build system changes
. Workaround automake sanity check that would prevent us from
installing lib/* into lib/backupninja/. Where else are be supposed
to install such files anyway?
+ . Have "make dist" ship handlers/*.in instead of make results.
+ . Have "make dist" ship the FAQ.
+ . Install handlers as pkgdata_DATA, instead of their .in files.
documentation changes
. Document what features available to .sh jobs (Redmine #1558).
diff --git a/INSTALL b/INSTALL
index 2895b6e..e082f56 100644
--- a/INSTALL
+++ b/INSTALL
@@ -5,7 +5,7 @@ Requirements:
bash gawk
Recommended:
- rdiff-backup duplicity rsync gzip hwinfo sfdisk
+ rdiff-backup duplicity rsync gzip hwinfo sfdisk cryptsetup flashrom hwinfo
Installation:
To install backupninja, simply do the following:
diff --git a/Makefile.am b/Makefile.am
index 3c50ad9..6e8d170 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,7 +1,7 @@
# vi: noexpandtab softtabstop=0
## Process this file with automake to produce Makefile.in
-EXTRA_DIST = README COPYING AUTHORS INSTALL NEWS ChangeLog \
+EXTRA_DIST = FAQ README COPYING AUTHORS INSTALL NEWS ChangeLog \
backupninja.spec backupninja.spec.in autogen.sh
SUBDIRS = etc examples handlers lib man src
diff --git a/configure.in b/configure.in
index 2d30687..612186b 100644
--- a/configure.in
+++ b/configure.in
@@ -3,7 +3,7 @@
# The maintainer mode is causing me grief with newest versions of autotools
#AM_MAINTAINER_MODE
-AC_INIT([backupninja],[0.9.10],[backupninja@lists.riseup.net])
+AC_INIT([backupninja],[1.0.1],[backupninja@lists.riseup.net])
AC_CONFIG_SRCDIR([src/backupninja.in])
AM_INIT_AUTOMAKE
diff --git a/examples/example.dup b/examples/example.dup
index 55ac565..067b6b1 100644
--- a/examples/example.dup
+++ b/examples/example.dup
@@ -56,7 +56,7 @@
## when set to yes, encryptkey variable must be set below; if you want to use
## two different keys for encryption and signing, you must also set the signkey
-## variable below.
+## variable (and probably signpassword) below.
## default is set to no, for backwards compatibility with backupninja <= 0.5.
##
## Default:
@@ -77,14 +77,23 @@
## Default:
# signkey =
-## password
-## NB: neither quote this, nor should it contain any quotes,
+## password used to unlock the encryption key
+## NB: neither quote this, nor should it contain any quotes,
## an example setting would be:
## password = a_very_complicated_passphrase
##
## Default:
# password =
+## password used to unlock the signature key, used only if
+## it differs from the encryption key
+## NB: neither quote this, nor should it contain any quotes,
+## an example setting would be:
+## signpassword = a_very_complicated_passphrase
+##
+## Default:
+# signpassword =
+
######################################################
## source section
## (where the files to be backed up are coming from)
@@ -226,8 +235,10 @@ exclude = /var/cache/backupninja/duplicity
## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
##
## duplicity >= 0.6.17
-## ------------------
-## supports only "-o IdentityFile=..."
+## -------------------
+## supports only "-oIdentityFile=..." since duplicity >=0.6.17 uses paramiko,
+## a ssh python module.
+## warning: requires no space beetween "-o" and "IdentityFile=...".
##
## Default:
# sshoptions =
diff --git a/examples/example.rdiff b/examples/example.rdiff
index e8ce542..323b3eb 100644
--- a/examples/example.rdiff
+++ b/examples/example.rdiff
@@ -53,6 +53,20 @@
## Default:
# ignore_version = no
+## should backupninja write program output as Info messages rather than Debug
+## messages? (default: no)
+## Usually rdiff-backup output (for increment expiration and backup) is written
+## to output as Debug messages; this option causes backupninja to use Info-level
+## messages instead. Since backup reports include Info messages, this option is
+## useful to receive output like rdiff-backup session statistics in reports. In
+## addition, since rdiff-backup has a habit of using a zero exit code when
+## non-fatal errors are encountered (causing backupninja to conclude the backup
+## was entirely successful), this option is useful for inspecting non-fatal
+## filesystem and permission errors from rdiff-backup.
+##
+## Default:
+# output_as_info = no
+
######################################################
## source section
## (where the files to be backed up are coming from)
diff --git a/examples/example.rsync b/examples/example.rsync
index a2795db..80365ae 100644
--- a/examples/example.rsync
+++ b/examples/example.rsync
@@ -36,9 +36,33 @@ backupdir = myserver
#tmp = /tmp
# specify backup storage format: short, long or mirror (i.e, no rotations)
+#
+# In the short format, incremental backups are rotated every day the handler
+# runs an by a finite number of times (backup.0, backup.1, backup.1, etc), so
+# if you want to have incremental backups for longer periods (like months) you
+# have to configure rotations for 30 or more using the "days" parameter at the
+# [general] section in the handler config.
+#
+# The short format is better described here:
+# http://www.mikerubel.org/computers/rsync_snapshots/#Incremental
+#
+# The long format is inspired by the maildir handler and allows keeping backups
+# of longer periods (weeks and months) using less rotations as it stores
+# the increments in folders like daily.1, weekly.1, monthly.1 and has three
+# rotation parameters:
+#
+# keepdaily = number of daily backup increments
+# keepweekly = number of weekly backup increments
+# keepmonthly = number of monthly backup increments
+#
format = short
# for short storage format, specify the number of backup increments (min = 2, set to 1 or less to disable)
+#
+# Note that setting days = 0 is almost the same as using format = mirror except
+# that with the days config your backup gets a .0 suffix at the destination
+# folder, making it easier to turn it later to an incremental backup.
+#
days = 7
# for long storage format, specify the number of daily backup increments
@@ -50,11 +74,6 @@ days = 7
# for long storage format, specify the number of monthly backup increments
#keepmonthly = 1
-# use this if you need a lockfile to be kept during backup execution
-# this is an useful feature in case you have some tasks that should
-# know if the backup is running or not
-#lockfile =
-
# rsync command nice level
#nicelevel = 0
diff --git a/examples/example.sys b/examples/example.sys
index fe34646..f1b5973 100644
--- a/examples/example.sys
+++ b/examples/example.sys
@@ -33,6 +33,15 @@
#
# (6) LVM metadata for every detected volume group, if "lvm = yes"
#
+# (7) a copy of each device's MBR, if "mbr = yes". A master boot record
+# (MBR) is the 512-byte boot sector that is the first sector of a
+# partitioned data storage device of a hard disk. To restore the MBR
+# one could do something like: dd if=sda.mbr of=/dev/sda
+# (MAKE SURE YOU PASS THE CORRECT DEVICE AS of= !!!)
+# WARNING: Restoring the MBR with a mismatching partition table will
+# make your data unreadable and nearly impossible to recover
+#
+# (8) a copy of the BIOS, if "bios = yes" and flashrom is installed
# here are the defaults, commented out:
@@ -65,6 +74,12 @@
# lvm = no
+# mbr = no
+
+# note: to backup your BIOS, you need the program 'flashrom' installed, and your
+# mainboard needs to be supported, see http://flashrom.org/Supported_hardware#Supported_mainboards
+# bios = no
+
# If vservers = yes in /etc/backupninja.conf then the following variables can
# be used:
# vsnames = all | <vserver1> <vserver2> ... (default = all)
diff --git a/handlers/Makefile.am b/handlers/Makefile.am
index 54155e8..bad53bb 100644
--- a/handlers/Makefile.am
+++ b/handlers/Makefile.am
@@ -3,16 +3,20 @@ HANDLERS = dup dup.helper maildir makecd \
makecd.helper mysql mysql.helper pgsql pgsql.helper rdiff \
rdiff.helper rsync sh svn sys sys.helper trac tar tar.helper
+DIST_HANDLERS = dup.in dup.helper.in maildir.in makecd.in \
+ makecd.helper.in mysql.in mysql.helper.in pgsql.in pgsql.helper.in rdiff.in \
+ rdiff.helper.in rsync.in sh.in svn.in sys.in sys.helper.in trac.in tar.in tar.helper.in wget
+
CLEANFILES = $(HANDLERS)
-EXTRA_DIST = Makefile.am $(HANDLERS)
+EXTRA_DIST = Makefile.am $(DIST_HANDLERS)
edit = sed \
-e "s,@BASH\@,$(BASH),g" \
-e "s,@AWK\@,$(AWK),g" \
-e "s,@SED\@,$(SED),g"
-dist_pkgdata_DATA = $(HANDLERS)
+pkgdata_DATA = $(HANDLERS)
dup: $(srcdir)/dup.in
rm -f dup
diff --git a/handlers/dsync.in b/handlers/dsync.in
new file mode 100644
index 0000000..db8e952
--- /dev/null
+++ b/handlers/dsync.in
@@ -0,0 +1,345 @@
+# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
+
+###############################################################
+#
+# This handler uses dovecot (version 2 or later) dsync backup
+# to backup mail to a remote server.
+#
+# Source and destination directories are typically configured
+# via the dovecot configuration, but can be overridden using
+# the settings here.
+#
+# if the configuration is setup to have keepdaily at 3,
+# keepweekly is 2, and keepmonthly is 1, then each user's
+# maildir backup snapshot directory will contain these files:
+# daily.1
+# daily.2
+# daily.3
+# weekly.1
+# weekly.2
+# monthly.1
+#
+# The basic algorithm is to dsync each user individually,
+# and to use hard links for retaining historical data.
+#
+# For the backup rotation to work, destuser must be able to run
+# arbitrary bash commands on the desthost.
+#
+# If 'remove' is set to 'yes' (default), then any mail directory
+# which is deleted from the source will be moved to a "deleted"
+# directory in the destination. It is up to you to periodically
+# remove this directory or old maildirs in it.
+#
+# Limitations:
+# . because we are not dynamically looking up anything with
+# dovecot's userdb, we expect all data to be under the same
+# tree on both the source and destination
+#
+# . we are assuming a backup to a backup server, so the
+# destination host should have its dovecot mail_location
+# configured to put the mail into
+# $stripped_destdir/$letter/$user/$current_backup
+#
+##############################################################
+
+getconf rotate yes
+getconf remove yes
+getconf backup yes
+
+getconf keepdaily 5
+getconf keepweekly 3
+getconf keepmonthly 1
+
+getconf srcconffile
+getconf destconffile
+getconf srcdir
+getconf destdir
+getconf current_backup current_backup
+getconf desthost
+getconf destport 22
+getconf destuser
+getconf destid_file /root/.ssh/id_rsa
+getconf sshoptions
+
+failedcount=0
+
+# strip leading mailbox specifier (eg. mdbox:; maildir:, etc)
+stripped_destdir=${destdir/*:/}
+stripped_srcdir=${srcdir/*:/}
+
+# strip trailing /
+destdir=${destdir%/}
+srcdir=${srcdir%/}
+
+if [ -n "$srcconffile" ]; then
+ srcconffile="-c $srcconffile"
+fi
+
+if [ -n "$destconffile" ]; then
+ destconffile="-c $destconffile"
+fi
+
+[ -d $stripped_srcdir ] || fatal "source directory $srcdir doesn't exist"
+
+
+##################################################################
+### FUNCTIONS
+
+function do_user() {
+ local user=$1
+ local btype=$2
+ local letter=${user:0:1}
+ local target="$stripped_destdir/$letter/$user/$btype.1"
+ local failedcount=0
+ local ret=0
+
+ debug "syncing"
+ while [ $failedcount -lt 3 ]; do
+ debug $DSYNC $testflags -u $user backup $srcconffile \
+ ssh -i $destid_file $destuser@$desthost $DSYNC $destconffile \
+ -u $user 2>&1
+ ret=`$DSYNC $testflags -u $user backup $srcconffile \
+ ssh -i $destid_file $destuser@$desthost $DSYNC $destconffile \
+ -u $user 2>&1`
+ ret=$?
+ if [ $ret == 2 ]; then
+ # dsync needs to be run again
+ let "failedcount = failedcount + 1"
+ elif [ $ret == 0 ]; then
+ # things worked, so we break out of the loop
+ break
+ ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions "date +%c%n%s > $stripped_destdir/$letter/$user/$btype.1/created"
+ elif [ $ret != 0 ]; then
+ # things did not work in a good way, report it and try again
+ warning "dsync $user failed"
+ warning " returned: $ret"
+ let "failedcount = failedcount + 1"
+ fi
+
+ if [ $failedcount -gt 3 ]; then
+ warning "dsync failed 3 times for this user -- something is not working right. bailing out."
+ fi
+ done
+}
+
+# remove any maildirs from backup which might have been deleted
+# and add new ones which have just been created.
+# (actually, it just moved them to the directory "deleted")
+
+function do_remove() {
+ local tmp1=`maketemp dsync-tmp-file`
+ local tmp2=`maketemp dsync-tmp-file`
+
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost mkdir -p "${stripped_destdir}/deleted"
+ cd "$stripped_srcdir"
+ for userdir in `ls -d1 */`; do
+ ls -1 "$stripped_srcdir/$userdir" | sort > $tmp1
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost ls -1 "$stripped_destdir/$userdir" | sort > $tmp2
+ for deluser in `join -v 2 $tmp1 $tmp2`; do
+ [ "$deluser" != "" ] || continue
+ info "removing $destuser@$desthost:$stripped_destdir/$userdir$deluser/"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost mv "$stripped_destdir/$userdir$deluser/" "$stripped_destdir/deleted"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost "date +%c%n%s > '$stripped_destdir/deleted/$deluser/deleted_on'"
+ done
+ done
+ rm $tmp1
+ rm $tmp2
+}
+
+function do_rotate() {
+ [ "$rotate" == "yes" ] || return;
+ local user=$1
+ local letter=${user:0:1}
+ local backuproot="$stripped_destdir/$letter/$user"
+(
+ ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions <<EOF
+##### BEGIN REMOTE SCRIPT #####
+ seconds_daily=86400
+ seconds_weekly=604800
+ seconds_monthly=2628000
+ keepdaily=$keepdaily
+ keepweekly=$keepweekly
+ keepmonthly=$keepmonthly
+ now=\`date +%s\`
+
+ if [ ! -d "$backuproot" ]; then
+ echo "Debug: skipping rotate of $user. $backuproot doesn't exist."
+ exit
+ fi
+
+ for rottype in daily weekly monthly; do
+ seconds=\$((seconds_\${rottype}))
+
+ dir="$backuproot/\$rottype"
+ if [ ! -d \$dir.1 ]; then
+ echo "Debug: \$dir.1 does not exist, skipping."
+ continue 1
+ fi
+
+ # Rotate the current list of backups, if we can.
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ #echo "Debug: oldest \$oldest"
+ [ "\$oldest" == "" ] && oldest=0
+ for (( i=\$oldest; i > 0; i-- )); do
+ if [ -d \$dir.\$i ]; then
+ if [ -f \$dir.\$i/created ]; then
+ created=\`tail -1 \$dir.\$i/created\`
+ else
+ created=0
+ fi
+ cutoff_time=\$(( now - (seconds*(i-1)) ))
+ if [ ! \$created -gt \$cutoff_time ]; then
+ next=\$(( i + 1 ))
+ if [ ! -d \$dir.\$next ]; then
+ echo "Debug: \$rottype.\$i --> \$rottype.\$next"
+ mv \$dir.\$i \$dir.\$next
+ date +%c%n%s > \$dir.\$next/rotated
+ else
+ echo "Debug: skipping rotation of \$dir.\$i because \$dir.\$next already exists."
+ fi
+ else
+ echo "Debug: skipping rotation of \$dir.\$i because it was created" \$(( (now-created)/86400)) "days ago ("\$(( (now-cutoff_time)/86400))" needed)."
+ fi
+ fi
+ done
+ done
+
+ max=\$((keepdaily+1))
+ if [ \( \$keepweekly -gt 0 -a -d $backuproot/daily.\$max \) -a ! -d $backuproot/weekly.1 ]; then
+ echo "Debug: daily.\$max --> weekly.1"
+ mv $backuproot/daily.\$max $backuproot/weekly.1
+ date +%c%n%s > $backuproot/weekly.1/rotated
+ fi
+
+ max=\$((keepweekly+1))
+ if [ \( \$keepmonthly -gt 0 -a -d $backuproot/weekly.\$max \) -a ! -d $backuproot/monthly.1 ]; then
+ echo "Debug: weekly.\$max --> monthly.1"
+ mv $backuproot/weekly.\$max $backuproot/monthly.1
+ date +%c%n%s > $backuproot/monthly.1/rotated
+ fi
+
+ for rottype in daily weekly monthly; do
+ max=\$((keep\${rottype}+1))
+ dir="$backuproot/\$rottype"
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ [ "\$oldest" == "" ] && oldest=0
+ # if we've rotated the last backup off the stack, remove it.
+ for (( i=\$oldest; i >= \$max; i-- )); do
+ if [ -d \$dir.\$i ]; then
+ if [ -d $backuproot/rotate.tmp ]; then
+ echo "Debug: removing rotate.tmp"
+ rm -rf $backuproot/rotate.tmp
+ fi
+ echo "Debug: moving \$rottype.\$i to rotate.tmp"
+ mv \$dir.\$i $backuproot/rotate.tmp
+ fi
+ done
+ done
+####### END REMOTE SCRIPT #######
+EOF
+) | (while read a; do passthru $a; done)
+
+}
+
+
+function setup_remote_dirs() {
+ local user=$1
+ local backuptype=$2
+ local letter=${user:0:1}
+ local dir="$stripped_destdir/$letter/$user/$backuptype"
+ local tmpdir="$stripped_destdir/$letter/$user/rotate.tmp"
+(
+ ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions <<EOF
+ if [ ! -d $stripped_destdir ]; then
+ echo "Fatal: Destination directory $stripped_destdir does not exist on host $desthost."
+ exit 1
+ elif [ -d $dir.1 ]; then
+ if [ -f $dir.1/created ]; then
+ echo "Warning: $dir.1 already exists. Overwriting contents."
+ else
+ echo "Warning: we seem to be resuming a partially written $dir.1"
+ fi
+ else
+ if [ -d $tmpdir ]; then
+ mv $tmpdir $dir.1
+ if [ \$? == 1 ]; then
+ echo "Fatal: could not mv $stripped_destdir/rotate.tmp $dir.1 on host $desthost"
+ exit 1
+ fi
+ else
+ mkdir --parents $dir.1
+ if [ \$? == 1 ]; then
+ echo "Fatal: could not create directory $dir.1 on host $desthost"
+ exit 1
+ fi
+ fi
+ if [ -d $dir.2 ]; then
+ echo "Debug: update links $backuptype.2 --> $backuptype.1"
+ cp -alf $dir.2/. $dir.1
+ #if [ \$? == 1 ]; then
+ # echo "Fatal: could not create hard links to $dir.1 on host $desthost"
+ # exit 1
+ #fi
+ fi
+ fi
+ [ -f $dir.1/created ] && rm $dir.1/created
+ [ -f $dir.1/rotated ] && rm $dir.1/rotated
+ exit 0
+EOF
+) | (while read a; do passthru $a; done)
+
+ if [ $? == 1 ]; then exit; fi
+}
+
+###
+##################################################################
+
+# see if we can login
+debug "ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions 'echo -n 1'"
+if [ ! $test ]; then
+ result=`ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions 'echo -n 1' 2>&1`
+ if [ "$result" != "1" ]; then
+ fatal "Can't connect to $desthost as $destuser using $destid_file."
+ fi
+fi
+
+## SANITY CHECKS ##
+status=`ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost "[ -d \"$stripped_destdir\" ] && echo 'ok'"`
+if [ "$status" != "ok" ]; then
+ fatal "Destination directory $stripped_destdir doesn't exist!"
+ exit
+fi
+
+### REMOVE OLD MAILDIRS ###
+
+if [ "$remove" == "yes" ]; then
+ do_remove
+fi
+
+### MAKE BACKUPS ###
+
+if [ "$backup" == "yes" ]; then
+ if [ $keepdaily -gt 0 ]; then btype=daily
+ elif [ $keepweekly -gt 0 ]; then btype=weekly
+ elif [ $keepmonthly -gt 0 ]; then btype=monthly
+ else fatal "keeping no backups"; fi
+
+ if [ "$testuser" != "" ]; then
+ cd "$stripped_srcdir/${user:0:1}"
+ do_rotate $testuser
+ setup_remote_dirs $testuser $btype
+ do_user $testuser $btype
+ else
+ [ -d "$stripped_srcdir" ] || fatal "directory $stripped_srcdir not found."
+ for user in `@DOVEADM@ user \* | cut -d@ -f1`
+ do
+ debug $user
+ [ "$user" != "" ] || continue
+ do_rotate $user
+ setup_remote_dirs $user $btype
+ do_user $user $btype
+ done
+ fi
+fi
+
diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in
index ea794c9..6f3281e 100644
--- a/handlers/dup.helper.in
+++ b/handlers/dup.helper.in
@@ -193,7 +193,7 @@ do_dup_gpg_signkey() {
}
do_dup_gpg_passphrase() {
- local question="Enter the passphrase needed to unlock the GnuPG key:"
+ local question="Enter the passphrase needed to unlock the GnuPG encryption key:"
REPLY=
while [ -z "$REPLY" -o -z "$dup_gpg_password" ]; do
passwordBox "$dup_title - GnuPG" "$question"
@@ -202,6 +202,16 @@ do_dup_gpg_passphrase() {
done
}
+do_dup_gpg_sign_passphrase() {
+ local question="Enter the passphrase needed to unlock the GnuPG signature key:"
+ REPLY=
+ while [ -z "$REPLY" -o -z "$dup_gpg_signpassword" ]; do
+ passwordBox "$dup_title - GnuPG" "$question"
+ [ $? = 0 ] || return 1
+ dup_gpg_signpassword="$REPLY"
+ done
+}
+
do_dup_gpg() {
# symmetric or public key encryption ?
@@ -226,6 +236,9 @@ do_dup_gpg() {
# a passphrase is alway needed
do_dup_gpg_passphrase
+ # If the signature key differs, we also need a passphrase for it
+ [ -n "$dup_gpg_signkey" -a -n "$dup_gpg_encryptkey" -a "$dup_gpg_signkey" != "$dup_gpg_encryptkey" ] && do_dup_gpg_sign_passphrase
+
_gpg_done="(DONE)"
setDefault adv
# TODO: replace the above line by the following when do_dup_conn is written
@@ -329,10 +342,19 @@ encryptkey = $dup_gpg_encryptkey
# if not set, encryptkey will be used.
signkey = $dup_gpg_signkey
-# password
-# NB: neither quote this, nor should it include any quotes
+## password used to unlock the encryption key
+## NB: neither quote this, nor should it contain any quotes,
+## an example setting would be:
+## password = a_very_complicated_passphrase
password = $dup_gpg_password
+## password used to unlock the signature key, used only if
+## it differs from the encryption key
+## NB: neither quote this, nor should it contain any quotes,
+## an example setting would be:
+## signpassword = a_very_complicated_passphrase
+signpassword = $dup_gpg_signpassword
+
######################################################
## source section
## (where the files to be backed up are coming from)
@@ -482,8 +504,10 @@ bandwidthlimit = $dup_bandwidth
## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
##
## duplicity >= 0.6.17
-## ------------------
-## supports only "-o IdentityFile=..."
+## -------------------
+## supports only "-oIdentityFile=..." since duplicity >=0.6.17 uses paramiko,
+## a ssh python module.
+## warning: requires no space beetween "-o" and "IdentityFile=...".
##
## Default:
# sshoptions =
@@ -582,6 +606,7 @@ dup_wizard() {
dup_gpg_onekeypair="yes"
dup_gpg_signkey=""
dup_gpg_password=""
+ dup_gpg_signpassword=""
dup_nicelevel=19
dup_testconnect=yes
dup_options=
diff --git a/handlers/dup.in b/handlers/dup.in
index 9eb2fbb..3c586c6 100644
--- a/handlers/dup.in
+++ b/handlers/dup.in
@@ -12,6 +12,7 @@ getconf tmpdir
setsection gpg
getconf password
+getconf signpassword
getconf sign no
getconf encryptkey
getconf signkey
@@ -46,6 +47,7 @@ destdir=${destdir%/}
[ -n "$desturl" -o -n "$destdir" ] || fatal "The destination directory (destdir) must be set when desturl is not used."
[ -n "$include" -o -n "$vsinclude" ] || fatal "No source includes specified"
[ -n "$password" ] || fatal "The password option must be set."
+[ -n "$signpassword" -a -n "$signkey" -a -n "$encryptkey" -a "$signkey" != "$encryptkey" ] || fatal "The signpassword option must be set because signkey is different from encryptkey."
if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "s3+http" ]; then
[ -n "$awsaccesskeyid" -a -n "$awssecretaccesskey" ] || fatal "AWS access keys must be set for S3 backups."
fi
@@ -55,6 +57,13 @@ fi
if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "ftp" ]; then
[ -n "$ftp_password" ] || fatal "ftp_password must be set for FTP backups."
fi
+if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "file" ]; then
+ if [ ! -e "`echo $desturl | @AWK@ -F '://' '{print $2}'`" ]; then
+ fatal "The destination directory ($desturl) does not exist."
+ elif [ ! -d "`echo $desturl | @AWK@ -F '://' '{print $2}'`" ]; then
+ fatal "The destination ($desturl) is not a directory."
+ fi
+fi
### VServers
# If vservers are configured, check that the ones listed in $vsnames do exist.
@@ -95,7 +104,7 @@ fi
### COMMAND-LINE MANGLING ######################################################
### initialize $execstr*
-execstr_precmd=
+execstr_precmd='LC_ALL=C'
execstr_command=
execstr_options="$options --no-print-statistics"
execstr_source=
@@ -110,16 +119,21 @@ fi
### duplicity version (ignore anything else than 0-9 and ".")
duplicity_version="`duplicity --version | @AWK@ '{print $2}' | @SED@ 's/[^.[:digit:]]//g'`"
-duplicity_major="`echo $duplicity_version | @AWK@ -F '.' '{print $1}'`"
-duplicity_minor="`echo $duplicity_version | @AWK@ -F '.' '{print $2}'`"
-duplicity_sub="`echo $duplicity_version | @AWK@ -F '.' '{print $3}'`"
### ssh/scp/sftp options (duplicity < 0.4.3 is unsupported)
## duplicity >= 0.6.17 : paramiko backend
-if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -ge 17 ]; then
+if version_ge "$duplicity_version" '0.6.17'; then
if [ -n "$sshoptions" ]; then
- echo "$sshoptions" | grep -Eqs '^-o[[:space:]]*IdentityFile=[^ ]+$' \
- || warning 'duplicity >= 0.6.17 only supports the IdentityFile SSH option'
+ if echo "$sshoptions" | grep -Eqs '^-o[[:space:]]*IdentityFile=[^ ]+$' ; then
+ spaceless_sshoptions="$(echo -n "$sshoptions" | @SED@ 's/^-o[[:space:]]*/-o/')"
+ if [ "$spaceless_sshoptions" != "$sshoptions" ] ; then
+ warning 'Since duplicity >= 0.6.17, sshoptions option requires no space between -o and IdentityFile.'
+ warning 'The bad space has been ignored. Update your duplicity handler config file to suppress this message.'
+ sshoptions="$spaceless_sshoptions"
+ fi
+ else
+ warning 'duplicity >= 0.6.17 only supports the IdentityFile SSH option'
+ fi
fi
execstr_options="${execstr_options} --ssh-options '$sshoptions'"
if [ "$bandwidthlimit" != 0 ]; then
@@ -176,7 +190,9 @@ else
fi
### Cleanup options
-execstr_options="${execstr_options} --extra-clean"
+if ! version_ge "$duplicity_version" '0.6.20'; then
+ execstr_options="${execstr_options} --extra-clean"
+fi
### Temporary directory
if [ -n "$tmpdir" ]; then
@@ -269,6 +285,7 @@ fi
debug "$execstr_precmd duplicity cleanup --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
@@ -288,6 +305,7 @@ if [ "$keep" != "yes" ]; then
debug "$execstr_precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
@@ -306,10 +324,11 @@ fi
# remove-all-inc-of-but-n-full : remove increments of older full backups : only keep latest ones
if [ "$keep" != "yes" ]; then
if [ "$keepincroffulls" != "all" ]; then
- if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -ge 10 ]; then
+ if version_ge "$duplicity_version" '0.6.10'; then
debug "$execstr_precmd duplicity remove-all-inc-of-but-n-full $keepincroffulls --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
@@ -332,6 +351,7 @@ debug "$execstr_precmd duplicity $execstr_command $execstr_options $execstr_sour
if [ ! $test ]; then
outputfile=`maketemp backupout`
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
diff --git a/handlers/maildir.in b/handlers/maildir.in
index 912c0e6..64ac987 100644
--- a/handlers/maildir.in
+++ b/handlers/maildir.in
@@ -85,9 +85,6 @@ if [ $test ]; then
testflags="--dry-run -v"
fi
-rsyncflags="$testflags -e 'ssh -p $destport -i $destid_file $sshoptions' -r -v --ignore-existing --delete --size-only --bwlimit=$speedlimit"
-excludes="--exclude '.Trash/\*' --exclude '.Mistakes/\*' --exclude '.Spam/\*'"
-
##################################################################
### FUNCTIONS
diff --git a/handlers/mysql.in b/handlers/mysql.in
index 65deebb..ceed620 100644
--- a/handlers/mysql.in
+++ b/handlers/mysql.in
@@ -256,10 +256,15 @@ then
for db in $databases
do
DUMP_BASE="$MYSQLDUMP $defaultsfile $sqldumpoptions"
- if [ "$db" = "information_schema" ] || [ "$db" = "performance_schema" ]
- then
- DUMP_BASE="${DUMP_BASE} --skip-lock-tables"
- fi
+
+ case "$db" in
+ information_schema)
+ DUMP_BASE="${DUMP_BASE} --skip-lock-tables"
+ ;;
+ performance_schema)
+ DUMP_BASE="${DUMP_BASE} --skip-lock-tables --skip-events"
+ ;;
+ esac
# Dumping structure and data
DUMP="$DUMP_BASE $ignore $db"
@@ -271,7 +276,7 @@ then
DUMP_STRUCT="$DUMP_BASE --no-data $db"
for qualified_table in $nodata
do
- table=$( expr match "$qualified_table" "$db\.\([^\w]*\)" )
+ table=$( expr match "$qualified_table" "$db\.\(.\+\)" )
DUMP_STRUCT="$DUMP_STRUCT $table"
done
DUMP="( $DUMP; $DUMP_STRUCT )"
@@ -297,7 +302,7 @@ then
if [ "$compress" == "yes" ]; then
execstr="$DUMP | $GZIP $GZIP_OPTS > '$dumpdir/${db}.sql.gz'"
else
- execstr="$DUMP -r '$dumpdir/${db}.sql'"
+ execstr="$DUMP > '$dumpdir/${db}.sql'"
fi
fi
debug "su $user -c \"$execstr\""
diff --git a/handlers/rdiff.helper.in b/handlers/rdiff.helper.in
index 1597305..e2ddc46 100644
--- a/handlers/rdiff.helper.in
+++ b/handlers/rdiff.helper.in
@@ -124,7 +124,7 @@ do_rdiff_dest() {
REPLY=
while [ -z "$REPLY" -o -z "$rdiff_directory" -o -z "$rdiff_host" -o -z "$rdiff_user" ]
do
- formBegin "$rdiff_title - destination: last three items are required"
+ formBegin "$rdiff_title - destination"
formItem "keep" "$rdiff_keep"
formItem "dest_directory" "$rdiff_directory"
formItem "dest_host" "$rdiff_host"
@@ -282,12 +282,33 @@ do_rdiff_con() {
setDefault finish
}
+do_rdiff_adv() {
+ booleanBox "$rdiff_title" "Should backupninja write program output as Info messages rather than Debug messages?" no
+ if [ $? = 0 ]; then
+ rdiff_output_as_info=yes
+ else
+ rdiff_output_as_info=no
+ fi
+}
+
do_rdiff_finish() {
get_next_filename $configdirectory/90.rdiff
cat > $next_filename <<EOF
# options = --force
# when = everyday at 02
+## should backupninja write program output as Info messages rather than Debug
+## messages? (default: no)
+## Usually rdiff-backup output (for increment expiration and backup) is written
+## to output as Debug messages; this option causes backupninja to use Info-level
+## messages instead. Since backup reports include Info messages, this option is
+## useful to receive output like rdiff-backup session statistics in reports. In
+## addition, since rdiff-backup has a habit of using a zero exit code when
+## non-fatal errors are encountered (causing backupninja to conclude the backup
+## was entirely successful), this option is useful for inspecting non-fatal
+## filesystem and permission errors from rdiff-backup.
+output_as_info = $rdiff_output_as_info
+
[source]
type = local
keep = $rdiff_keep
@@ -378,6 +399,7 @@ rdiff_main_menu() {
src "$srcitem" \
dest "$destitem" \
conn "$conitem" \
+ adv "$advitem" \
finish "finish and create config file"
[ $? = 0 ] || return
result="$REPLY"
@@ -411,6 +433,7 @@ rdiff_wizard() {
rdiff_type=remote
rdiff_user=
rdiff_host=
+ rdiff_output_as_info="no"
# Global variables whose '*' shall not be expanded
set -o noglob
diff --git a/handlers/rdiff.in b/handlers/rdiff.in
index c3c8d1d..471a3d7 100644
--- a/handlers/rdiff.in
+++ b/handlers/rdiff.in
@@ -85,6 +85,7 @@ getconf testconnect yes
getconf nicelevel 0
getconf bwlimit
getconf ignore_version no
+getconf output_as_info no
setsection source
getconf type; sourcetype=$type
@@ -182,7 +183,11 @@ if [ "$keep" != yes ]; then
if [ $test = 0 ]; then
output="`su -c "$removestr" 2>&1`"
if [ $? = 0 ]; then
- debug $output
+ if [ "$output_as_info" == "yes" ]; then
+ info $output
+ else
+ debug $output
+ fi
info "Removing backups older than $keep days succeeded."
else
warning $output
@@ -268,7 +273,11 @@ debug "$execstr"
if [ $test = 0 ]; then
output=`nice -n $nicelevel su -c "$execstr" 2>&1`
if [ $? = 0 ]; then
- debug $output
+ if [ "$output_as_info" == "yes" ]; then
+ info $output
+ else
+ debug $output
+ fi
info "Successfully finished backing up source $label"
else
error $output
diff --git a/handlers/rsync.in b/handlers/rsync.in
index e624e6f..13399c9 100644
--- a/handlers/rsync.in
+++ b/handlers/rsync.in
@@ -39,7 +39,6 @@
# keepdaily = for long storage format, specify the number of daily backup increments
# keepweekly = for long storage format, specify the number of weekly backup increments
# keepmonthly = for long storage format, specify the number of monthly backup increments
-# lockfile = lockfile to be kept during backup execution
# nicelevel = rsync command nice level
# enable_mv_timestamp_bug = set to "yes" if your system isnt handling timestamps correctly
# tmp = temp folder
@@ -129,7 +128,6 @@ function eval_config {
getconf keepdaily 5
getconf keepweekly 3
getconf keepmonthly 1
- getconf lockfile
getconf nicelevel 0
getconf enable_mv_timestamp_bug no
getconf tmp /tmp
@@ -277,10 +275,15 @@ function eval_config {
mv=move_files
fi
- for path in $exclude; do
- excludes="$excludes --exclude=$path"
+ set -o noglob
+ SAVEIFS=$IFS
+ IFS=$(echo -en "\n\b")
+ for i in $exclude; do
+ str="${i//__star__/*}"
+ excludes="${excludes} --exclude='$str'"
done
-
+ IFS=$SAVEIFS
+ set +o noglob
}
function rotate_short {
@@ -391,8 +394,8 @@ function rotate_long {
local metadata
if [ ! -d "$backuproot" ]; then
- echo "Debug: skipping rotate of $backuproot as it doesn't exist."
- exit
+ warning "Skipping rotate of $backuproot as it doesn't exist."
+ return
fi
for rottype in daily weekly monthly; do
@@ -405,12 +408,12 @@ function rotate_long {
echo "Debug: $dir.1 does not exist, skipping."
continue 1
elif [ ! -f $metadata.1/created ] && [ ! -f $metadata.1/rotated ]; then
- echo "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
+ warning "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
# Rotate the current list of backups, if we can.
- oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1`
+ oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0
for (( i=$oldest; i > 0; i-- )); do
if [ -d $dir.$i ]; then
@@ -421,11 +424,16 @@ function rotate_long {
else
created=0
fi
+ # Validate created date
+ if [ -z "$created" ] || echo $created | grep -v -q -e '^[0-9]*$'; then
+ warning "Invalid metadata $created. Skipping rotation."
+ break
+ fi
cutoff_time=$(( now - (seconds*(i-1)) ))
if [ ! $created -gt $cutoff_time ]; then
next=$(( i + 1 ))
if [ ! -d $dir.$next ]; then
- echo "Debug: $rottype.$i --> $rottype.$next"
+ debug "$rottype.$i --> $rottype.$next"
$nice mv $dir.$i $dir.$next
mkdir -p $metadata.$next
date +%c%n%s > $metadata.$next/rotated
@@ -433,10 +441,10 @@ function rotate_long {
$nice mv $metadata.$i/created $metadata.$next
fi
else
- echo "Debug: skipping rotation of $dir.$i because $dir.$next already exists."
+ debug "skipping rotation of $dir.$i because $dir.$next already exists."
fi
else
- echo "Debug: skipping rotation of $dir.$i because it was created" $(( (now-created)/86400)) "days ago ("$(( (now-cutoff_time)/86400))" needed)."
+ debug "skipping rotation of $dir.$i because it was created" $(( (now-created)/86400)) "days ago ("$(( (now-cutoff_time)/86400))" needed)."
fi
fi
done
@@ -444,7 +452,7 @@ function rotate_long {
max=$((keepdaily+1))
if [ $keepweekly -gt 0 -a -d $backuproot/daily.$max -a ! -d $backuproot/weekly.1 ]; then
- echo "Debug: daily.$max --> weekly.1"
+ debug "daily.$max --> weekly.1"
$nice mv $backuproot/daily.$max $backuproot/weekly.1
mkdir -p $backuproot/metadata/weekly.1
# Update increment folder date and setup metadata
@@ -457,7 +465,7 @@ function rotate_long {
max=$((keepweekly+1))
if [ $keepmonthly -gt 0 -a -d $backuproot/weekly.$max -a ! -d $backuproot/monthly.1 ]; then
- echo "Debug: weekly.$max --> monthly.1"
+ debug "weekly.$max --> monthly.1"
$nice mv $backuproot/weekly.$max $backuproot/monthly.1
mkdir -p $backuproot/metadata/monthly.1
# Update increment folder date and setup metadata
@@ -471,16 +479,16 @@ function rotate_long {
for rottype in daily weekly monthly; do
max=$((keep${rottype}+1))
dir="$backuproot/$rottype"
- oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1`
+ oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it.
for (( i=$oldest; i >= $max; i-- )); do
if [ -d $dir.$i ]; then
if [ -d $backuproot/rotate.tmp ]; then
- echo "Debug: removing rotate.tmp"
+ debug "removing rotate.tmp"
$nice rm -rf $backuproot/rotate.tmp
fi
- echo "Debug: moving $rottype.$i to rotate.tmp"
+ debug "moving $rottype.$i to rotate.tmp"
$nice mv $dir.$i $backuproot/rotate.tmp
fi
done
@@ -513,7 +521,7 @@ function rotate_long_remote {
now=\`date +%s\`
if [ ! -d "$backuproot" ]; then
- echo "Debug: skipping rotate of $backuproot as it doesn't exist."
+ echo "Fatal: skipping rotate of $backuproot as it doesn't exist."
exit
fi
@@ -532,7 +540,7 @@ function rotate_long_remote {
fi
# Rotate the current list of backups, if we can.
- oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0
for (( i=\$oldest; i > 0; i-- )); do
if [ -d \$dir.\$i ]; then
@@ -543,6 +551,11 @@ function rotate_long_remote {
else
created=0
fi
+ # Validate created date
+ if [ -z "\$created" ] || echo \$created | grep -v -q -e '^[0-9]*$'; then
+ echo "Warning: Invalid metadata \$created. Skipping rotation."
+ break
+ fi
cutoff_time=\$(( now - (seconds*(i-1)) ))
if [ ! \$created -gt \$cutoff_time ]; then
next=\$(( i + 1 ))
@@ -593,7 +606,7 @@ function rotate_long_remote {
for rottype in daily weekly monthly; do
max=\$((keep\${rottype}+1))
dir="$backuproot/\$rottype"
- oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it.
for (( i=\$oldest; i >= \$max; i-- )); do
@@ -838,7 +851,7 @@ function update_metadata {
# finished, otherwise daily rotations might not take place.
# If we used backup end time, in the next handler run
# we might not have $now - $created >= 24:00
- echo $starttime > $metadata/created
+ echo "$starttime" > $metadata/created
$touch $backupdir/$SECTION/$suffix
else
folder="`echo $dest_path | cut -d : -f 2`"
@@ -852,7 +865,7 @@ function update_metadata {
# finished, otherwise daily rotations might not take place.
# If we used backup end time, in the next handler run
# we might not have $now - $created >= 24:00
- echo $starttime > $metadata/created
+ echo "$starttime" > $metadata/created
##### END REMOTE SCRIPT #######
EOF
) | (while read a; do passthru $a; done)
@@ -883,45 +896,6 @@ function test_connect {
}
-function set_lockfile {
-
- if [ ! -z "$lockfile" ]; then
- mkdir -p `dirname $lockfile`
- if ( set -o noclobber; echo "$$" > "$lockfile" ) &> /dev/null; then
- trap 'unset_lockfile' INT TERM EXIT
- else
- fatal "Could not create lockfile $lockfile, exiting"
- fi
- fi
-
-}
-
-function unset_lockfile {
-
- if [ ! -z "$lockfile" ]; then
- $rm -f $lockfile || warning "Could not remove lockfile $lockfile"
- fi
-
-}
-
-function check_lockfile {
-
- local pid process
-
- if [ ! -z "$lockfile" ] && [ -f "$lockfile" ]; then
- pid="`cat $lockfile`"
- process="`ps --no-headers -o comm $pid`"
- if [ "$?" == "0" ] && [ "`ps --no-headers -o comm $$`" == "$process" ]; then
- info "Another backup is running for $lockfile, skipping run"
- exit
- else
- info "Found old lockfile $lockfile, removing it"
- unset_lockfile
- fi
- fi
-
-}
-
function set_filelist {
filelist_flag=""
@@ -1108,18 +1082,58 @@ function end_mux {
}
+function set_pipefail {
+
+ # Save initial pipefail status for later restoration
+ if echo "$SHELLOPTS" | grep -q ":pipefail"; then
+ pipefail="-o"
+ else
+ pipefail="+o"
+ fi
+
+ # Ensure that a non-zero rsync exit status is caught by our handler
+ set -o pipefail
+
+}
+
+function restore_pipefail {
+
+ if [ ! -z "$pipefail" ]; then
+ set $pipefail pipefail
+ fi
+
+}
+
+function check_rsync_exit_status {
+
+ if [ -z "$1" ]; then
+ return
+ fi
+
+ case $1 in
+ 0)
+ return
+ ;;
+ 1|2|3|4|5|6|10|11|12|13|14|21)
+ fatal "Rsync error $1 when trying to transfer $SECTION"
+ ;;
+ *)
+ warning "Rsync error $1 when trying to transfer $SECTION"
+ ;;
+ esac
+
+}
+
# the backup procedure
eval_config
-check_lockfile
-set_lockfile
set_rsync_options
start_mux
stop_services
mount_rw
starttime="`date +%c%n%s`"
-echo "Starting backup at `echo $starttime | head -n 1`" >> $log
+echo "Starting backup at `echo "$starttime" | head -n 1`" >> $log
for SECTION in $include; do
@@ -1130,13 +1144,13 @@ for SECTION in $include; do
set_dest
info "Syncing $SECTION on $dest_path..."
- debug $nice $rsync "${rsync_options[@]}" $filelist_flag $excludes $batch_option $orig $dest_path
- $nice $rsync "${rsync_options[@]}" $filelist_flag $excludes $batch_option $orig $dest_path | tee -a $log
-
- if [ "$?" != "0" ]; then
- fatal "Rsync error when trying to transfer $SECTION"
- fi
+ command="$rsync ${rsync_options[@]} --delete-excluded $filelist_flag $excludes $batch_option $orig $dest_path"
+ debug $nice su -c "$command"
+ set_pipefail
+ $nice su -c "$command" | tee -a $log
+ check_rsync_exit_status $?
+ restore_pipefail
update_metadata
done
@@ -1144,7 +1158,6 @@ done
mount_ro
run_fsck
start_services
-unset_lockfile
end_mux
echo "Finnishing backup at `date`" >> $log
diff --git a/handlers/sys.helper.in b/handlers/sys.helper.in
index 8a2fb07..f728f51 100644
--- a/handlers/sys.helper.in
+++ b/handlers/sys.helper.in
@@ -20,6 +20,8 @@ sys_wizard() {
hardware="hardware = no"
luksheaders="luksheaders = no"
lvm="lvm = no"
+ mbr="mbr = no"
+ bios="bios = no"
for opt in $result; do
case $opt in
'"packages"') packages="packages = yes";;
@@ -28,6 +30,8 @@ sys_wizard() {
'"hardware"') hardware="hardware = yes";;
'"luksheaders"') luksheaders="luksheaders = yes";;
'"lvm"') lvm="lvm = yes";;
+ '"mbr"') mbr="mbr = yes";;
+ '"bios"') bios="bios = yes";;
esac
done
get_next_filename $configdirectory/10.sys
@@ -38,6 +42,8 @@ $sfdisk
$hardware
$luksheaders
$lvm
+$mbr
+$bios
# packagesfile = /var/backups/dpkg-selections.txt
# selectionsfile = /var/backups/debconfsel.txt
@@ -51,4 +57,3 @@ $lvm
EOF
chmod 600 $next_filename
}
-
diff --git a/handlers/sys.in b/handlers/sys.in
index 74133a3..605e583 100644
--- a/handlers/sys.in
+++ b/handlers/sys.in
@@ -25,14 +25,23 @@
# (4) hardware information.
# write to a text file the important things which hwinfo can gleen.
#
-# (5) the Luks header of every Luks block device, if option luksheaders
+# (5) the LUKS header of every LUKS block device, if option luksheaders
# is enabled.
-# in case you (have to) scramble such a Luks header (for some time),
+# in case you (have to) scramble such a LUKS header (for some time),
# and restore it later by running "dd if=luksheader.sda2.bin of=/dev/sda2"
# (MAKE SURE YOU PASS THE CORRECT DEVICE AS of= !!!)
#
# (6) LVM metadata for every detected volume group, if "lvm = yes"
#
+# (7) a copy of each device's MBR, if "mbr = yes". A master boot record
+# (MBR) is the 512-byte boot sector that is the first sector of a
+# partitioned data storage device of a hard disk. To restore the MBR
+# one could do something like: dd if=sda.mbr of=/dev/sda
+# (MAKE SURE YOU PASS THE CORRECT DEVICE AS of= !!!)
+# WARNING: Restoring the MBR with a mismatching partition table will
+# make your data unreadable and nearly impossible to recover
+#
+# (8) a copy of the BIOS, if "bios = yes" and flashrom is installed
if [ -f /etc/debian_version ]
then
@@ -44,6 +53,11 @@ then
os=redhat
debug "Redhat detected"
osversion="/etc/redhat-release"
+elif [ -f /etc/SuSE-release ]
+then
+ os=suse
+ debug "SuSE detected"
+ osversion="/etc/SuSE-release"
else
warning "Unknown OS detected!"
fi
@@ -57,14 +71,14 @@ if [ ! -d $parentdir ]; then
mkdir -p $parentdir
fi
-if [ $os = "debian" ]
+if [ "$os" = "debian" ]
then
getconf packagesfile $parentdir/dpkg-selections.txt
getconf packagemgr `which dpkg`
getconf packagemgroptions ' --get-selections *'
getconf selectionsfile $parentdir/debconfsel.txt
getconf debconfgetselections `which debconf-get-selections`
-elif [ $os = "redhat" ]
+elif [ "$os" = "redhat" ]
then
getconf packagesfile $parentdir/rpmpackages.txt
getconf packagemgr `which rpm`
@@ -72,6 +86,11 @@ then
getconf SYSREPORT `which sysreport`
getconf sysreport_options ' -norpm '
+elif [ "$os" = "suse" ]
+then
+ getconf packagesfile $parentdir/rpmpackages.txt
+ getconf packagemgr `which rpm`
+ getconf packagemgroptions ' -qa '
else
getconf packagesfile $parentdir/unknownOS.txt
fi
@@ -100,6 +119,12 @@ getconf VGS `which vgs`
getconf VGCFGBACKUP `which vgcfgbackup`
getconf lvm no
+getconf mbr no
+getconf mbrfile $parentdir/mbr.__star__.bin
+
+getconf FLASHROM `which flashrom`
+getconf bios no
+
getconf vsnames all
# If vservers are configured, check that the ones listed in $vsnames are running.
@@ -119,11 +144,11 @@ fi
if [ "$luksheaders" == "yes" ]; then
if [ ! -x "$DD" ]; then
- warning "can't find dd, skipping backup of Luks headers."
+ warning "can't find dd, skipping backup of LUKS headers."
luksheaders="no"
fi
if [ ! -x "$CRYPTSETUP" ]; then
- warning "can't find cryptsetup, skipping backup of Luks headers."
+ warning "can't find cryptsetup, skipping backup of LUKS headers."
luksheaders="no"
fi
fi
@@ -139,6 +164,20 @@ if [ "$lvm" == "yes" ]; then
fi
fi
+if [ "$mbr" == "yes" ]; then
+ if [ ! -x "$DD" ]; then
+ warning "can't find dd, skipping backup of MBR."
+ mbr="no"
+ fi
+fi
+
+if [ "$bios" == "yes" ]; then
+ if [ ! -x "$FLASHROM" ]; then
+ warning "can't find flashrom, skipping backup of BIOS."
+ mbr="no"
+ fi
+fi
+
## PACKAGES ##############################
#
@@ -348,13 +387,15 @@ echo "Getting information about the kernel."
echo
STATUS="Getting kernel version:"
catifexec "/bin/uname" "-a"
-STATUS="Checking module information:"
-catifexec "/sbin/lsmod"
-for x in $(/sbin/lsmod | /usr/bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
-) ; do
- STATUS="Checking module information $x:"
- catifexec "/sbin/modinfo" "$x"
-done
+if [ "$hardware" == "yes" ]; then
+ STATUS="Checking module information:"
+ catifexec "/sbin/lsmod"
+ for x in $(/sbin/lsmod | /usr/bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
+ ) ; do
+ STATUS="Checking module information $x:"
+ catifexec "/sbin/modinfo" "$x"
+ done
+fi
STATUS="Gathering information about your filesystems:"
catiffile "/proc/filesystems"
@@ -432,9 +473,11 @@ catiffile "/proc/rtc"
STATUS="Gathering information about your ide drivers:"
catiffile "/proc/ide"
-STATUS="Gathering information about your bus:"
-catifexec "/usr/bin/lspci"
-catiffile "/proc/bus"
+if [ "$hardware" == "yes" ]; then
+ STATUS="Gathering information about your bus:"
+ catifexec "/usr/bin/lspci"
+ catiffile "/proc/bus"
+fi
echo
echo "Getting disk and filesystem information."
@@ -591,8 +634,10 @@ fi
if [ "$luksheaders" == "yes" ]; then
devices=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^Disk /dev" | @AWK@ '{print $2}' | cut -d: -f1`
[ -n "$devices" ] || warning "No block device found"
+ partitions=`LC_ALL=C $SFDISK -l 2>/dev/null |grep "^/dev" | @AWK@ '{print $1}'`
+ [ -n "$partitions" ] || warning "No partitions found"
targetdevices=""
- for dev in $devices; do
+ for dev in $devices $partitions; do
[ -b $dev ] || continue
debug "$CRYPTSETUP isLuks $dev"
$CRYPTSETUP isLuks $dev
@@ -603,26 +648,45 @@ if [ "$luksheaders" == "yes" ]; then
label=${label//\//-}
outputfile=${luksheadersfile//__star__/$label}
# the following sizes are expressed in terms of 512-byte sectors
- debug "Let us find out the Luks header size for $dev"
+ debug "Let us find out the LUKS header size for $dev"
debug "$CRYPTSETUP luksDump \"$dev\" | grep '^Payload offset:' | @AWK@ '{print $3}'"
headersize=`$CRYPTSETUP luksDump "$dev" | grep '^Payload offset:' | @AWK@ '{print $3}'`
if [ $? -ne 0 ]; then
- warning "Could not compute the size of Luks header, skipping device $dev"
+ warning "Could not compute the size of LUKS header, skipping $dev"
continue
- elif [ -z "$headersize" -o -n "`echo \"$headersize\" | sed 's/[0-9]*//g'`" ]; then
- warning "The computed size of Luks header is not an integer, skipping device $dev"
+ elif [ -z "$headersize" -o -n "`echo \"$headersize\" | @SED@ 's/[0-9]*//g'`" ]; then
+ warning "The computed size of LUKS header is not an integer, skipping $dev"
continue
fi
- debug "Let us backup the Luks header of device $dev"
+ debug "Let us backup the LUKS header of $dev"
debug "$DD if=\"${dev}\" of=\"${outputfile}\" bs=512 count=\"${headersize}\""
output=`$DD if="${dev}" of="${outputfile}" bs=512 count="${headersize}" 2>&1`
exit_code=$?
if [ $exit_code -eq 0 ]; then
debug $output
- info "The Luks header of $dev was saved to $outputfile."
+ info "The LUKS header of $dev was saved to $outputfile."
else
debug $output
- fatal "The Luks header of $dev could not be saved."
+ fatal "The LUKS header of $dev could not be saved."
+ fi
+ done
+fi
+
+if [ "$mbr" == "yes" ]; then
+ devices=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^Disk /dev" | @AWK@ '{print $2}' | cut -d: -f1`
+ if [ "$devices" == "" ]; then
+ warning "No harddisks found"
+ fi
+ for dev in $devices; do
+ debug "Will try to backup MBR tables for device $dev"
+ [ -b $dev ] || continue
+ label=${dev#/dev/}
+ label=${label//\//-}
+ outputfile=${mbrfile//__star__/$label}
+ debug "$DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null"
+ $DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null
+ if [ $? -ne 0 ]; then
+ warning "The MBR for $dev could not be saved."
fi
done
fi
@@ -686,3 +750,14 @@ if [ "$lvm" == "yes" ]; then
;;
esac
fi
+
+## BIOS ####################################
+
+if [ "$bios" == "yes" ]; then
+ debug "Trying to backup BIOS"
+ debug "$FLASHROM -r ${parentdir}/bios --programmer internal >/dev/null 2>&1"
+ $FLASHROM -r ${parentdir}/bios --programmer internal >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ warning "The BIOS could not be saved."
+ fi
+fi
diff --git a/handlers/tar.helper.in b/handlers/tar.helper.in
index 4a483be..91fec34 100644
--- a/handlers/tar.helper.in
+++ b/handlers/tar.helper.in
@@ -25,7 +25,8 @@ tar_wizard() {
"none" "do not filter trough" off \
"compress" "filter trough compress" off \
"gzip" "filter trough gzip" off \
- "bzip" "filter trough bzip" on
+ "bzip" "filter trough bzip" on \
+ "xz" "filter trough xz" off
[ $? = 1 ] && return;
result="$REPLY"
tar_compress="compress = $REPLY "
diff --git a/handlers/tar.in b/handlers/tar.in
index b4f8c58..7497306 100644
--- a/handlers/tar.in
+++ b/handlers/tar.in
@@ -48,6 +48,10 @@ case $compress in
compress_option="-j"
EXTENSION="tar.bz2"
;;
+ "xz")
+ compress_option="-J"
+ EXTENSION="tar.xz"
+ ;;
"none")
compress_option=""
;;
@@ -68,11 +72,13 @@ debug "Running backup: " $TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes
+if [ ! $test ]; then
$TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes \
> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.list \
2> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.err
+fi
[ $? -ne 0 ] && fatal "Tar backup failed"
diff --git a/lib/tools.in b/lib/tools.in
index 929826b..55c2650 100644
--- a/lib/tools.in
+++ b/lib/tools.in
@@ -15,6 +15,44 @@ function maketemp() {
echo $tempfile
}
+#
+# compare version numbers.
+# returns 0 if equal, 1 if $1>$2, and 2 if $1<$2
+#
+
+function compare_versions() {
+ if [[ "$1" == "$2" ]]; then
+ return 0
+ fi
+ local IFS=.
+ local i version_1=($1) version_2=($2)
+ for ((i=${#version_1[@]}; i<${#version_2[@]}; i++)); do
+ version_1[i]=0
+ done
+ for ((i=0; i<${#version_1[@]}; i++)); do
+ if [[ -z ${version_2[i]} ]]; then
+ version_2[i]=0
+ fi
+ if ((10#${version_1[i]} > 10#${version_2[i]})); then
+ return 1
+ fi
+ if ((10#${version_1[i]} < 10#${version_2[i]})); then
+ return 2
+ fi
+ done
+ return 0
+}
+
+#
+# compare version numbers: >=
+#
+
+function version_ge() {
+ compare_versions "$1" "$2"
+ comp=$?
+ [ $comp -eq 0 ] || [ $comp -eq 1 ]
+}
+
#####################################################
## CONFIG-FILE RELATED FUNCTIONS
diff --git a/src/backupninja.in b/src/backupninja.in
index 0400828..0901742 100755
--- a/src/backupninja.in
+++ b/src/backupninja.in
@@ -326,32 +326,32 @@ function process_action() {
# start locked section : avoid concurrent execution of the same backup
# uses a construct specific to shell scripts with flock. See man flock for details
{
- debug "executing handler in locked section controlled by $lockfile"
- flock -x -w 5 200
- # if all is good, we acquired the lock
- if [ $? -eq 0 ]; then
-
- let "actions_run += 1"
-
- # call the handler:
- echo_debug_msg=1
- (
- . $scriptdirectory/$suffix $file
- ) 2>&1 | (
- while read a; do
- echo $a >> $bufferfile
- [ $debug ] && colorize "$a"
- done
- )
- retcode=$?
- # ^^^^^^^^ we have a problem! we can't grab the return code "$?". grrr.
- echo_debug_msg=0
-
- else
- # a backup is probably ongoing already, so display an error message
- debug "failed to acquire lock"
- echo "Fatal: Could not acquire lock $lockfile. A backup is probably already running for $file." >>$bufferfile
- fi
+ debug "executing handler in locked section controlled by $lockfile"
+ flock -x -w 5 200
+ # if all is good, we acquired the lock
+ if [ $? -eq 0 ]; then
+
+ let "actions_run += 1"
+
+ # call the handler:
+ echo_debug_msg=1
+ (
+ . $scriptdirectory/$suffix $file
+ ) 2>&1 | (
+ while read a; do
+ echo $a >> $bufferfile
+ [ $debug ] && colorize "$a"
+ done
+ )
+ retcode=$?
+ # ^^^^^^^^ we have a problem! we can't grab the return code "$?". grrr.
+ echo_debug_msg=0
+
+ else
+ # a backup is probably ongoing already, so display an error message
+ debug "failed to acquire lock $lockfile"
+ echo "Fatal: Could not acquire lock $lockfile. A backup is probably already running for $file." >>$bufferfile
+ fi
} 200> $lockfile
# end of locked section
@@ -508,6 +508,8 @@ getconf PGSQLUSER postgres
getconf GZIP /bin/gzip
getconf GZIP_OPTS --rsyncable
getconf RSYNC /usr/bin/rsync
+getconf DSYNC /usr/bin/dsync
+getconf DOVEADM /usr/bin/doveadm
getconf admingroup root
# initialize vservers support
@@ -590,7 +592,7 @@ if [ $doit == 1 ]; then
{
for ((i=0; i < ${#messages[@]} ; i++)); do
- echo ${messages[$i]}
+ echo ${messages[$i]}
done
echo -e "$errormsg"
if [ "$reportspace" == "yes" ]; then