aboutsummaryrefslogtreecommitdiff
path: root/handlers
diff options
context:
space:
mode:
Diffstat (limited to 'handlers')
-rw-r--r--handlers/Makefile.am8
-rw-r--r--handlers/dsync.in345
-rw-r--r--handlers/dup.helper.in35
-rw-r--r--handlers/dup.in38
-rw-r--r--handlers/maildir.in4
-rw-r--r--handlers/mysql.in17
-rw-r--r--handlers/rdiff.helper.in25
-rw-r--r--handlers/rdiff.in13
-rw-r--r--handlers/rsync.in171
-rw-r--r--handlers/sys.helper.in7
-rw-r--r--handlers/sys.in123
-rw-r--r--handlers/tar.helper.in3
-rw-r--r--handlers/tar.in6
13 files changed, 670 insertions, 125 deletions
diff --git a/handlers/Makefile.am b/handlers/Makefile.am
index 54155e8..bad53bb 100644
--- a/handlers/Makefile.am
+++ b/handlers/Makefile.am
@@ -3,16 +3,20 @@ HANDLERS = dup dup.helper maildir makecd \
makecd.helper mysql mysql.helper pgsql pgsql.helper rdiff \
rdiff.helper rsync sh svn sys sys.helper trac tar tar.helper
+DIST_HANDLERS = dup.in dup.helper.in maildir.in makecd.in \
+ makecd.helper.in mysql.in mysql.helper.in pgsql.in pgsql.helper.in rdiff.in \
+ rdiff.helper.in rsync.in sh.in svn.in sys.in sys.helper.in trac.in tar.in tar.helper.in wget
+
CLEANFILES = $(HANDLERS)
-EXTRA_DIST = Makefile.am $(HANDLERS)
+EXTRA_DIST = Makefile.am $(DIST_HANDLERS)
edit = sed \
-e "s,@BASH\@,$(BASH),g" \
-e "s,@AWK\@,$(AWK),g" \
-e "s,@SED\@,$(SED),g"
-dist_pkgdata_DATA = $(HANDLERS)
+pkgdata_DATA = $(HANDLERS)
dup: $(srcdir)/dup.in
rm -f dup
diff --git a/handlers/dsync.in b/handlers/dsync.in
new file mode 100644
index 0000000..db8e952
--- /dev/null
+++ b/handlers/dsync.in
@@ -0,0 +1,345 @@
+# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
+
+###############################################################
+#
+# This handler uses dovecot (version 2 or later) dsync backup
+# to backup mail to a remote server.
+#
+# Source and destination directories are typically configured
+# via the dovecot configuration, but can be overridden using
+# the settings here.
+#
+# if the configuration is setup to have keepdaily at 3,
+# keepweekly is 2, and keepmonthly is 1, then each user's
+# maildir backup snapshot directory will contain these files:
+# daily.1
+# daily.2
+# daily.3
+# weekly.1
+# weekly.2
+# monthly.1
+#
+# The basic algorithm is to dsync each user individually,
+# and to use hard links for retaining historical data.
+#
+# For the backup rotation to work, destuser must be able to run
+# arbitrary bash commands on the desthost.
+#
+# If 'remove' is set to 'yes' (default), then any mail directory
+# which is deleted from the source will be moved to a "deleted"
+# directory in the destination. It is up to you to periodically
+# remove this directory or old maildirs in it.
+#
+# Limitations:
+# . because we are not dynamically looking up anything with
+# dovecot's userdb, we expect all data to be under the same
+# tree on both the source and destination
+#
+# . we are assuming a backup to a backup server, so the
+# destination host should have its dovecot mail_location
+# configured to put the mail into
+# $stripped_destdir/$letter/$user/$current_backup
+#
+##############################################################
+
+getconf rotate yes
+getconf remove yes
+getconf backup yes
+
+getconf keepdaily 5
+getconf keepweekly 3
+getconf keepmonthly 1
+
+getconf srcconffile
+getconf destconffile
+getconf srcdir
+getconf destdir
+getconf current_backup current_backup
+getconf desthost
+getconf destport 22
+getconf destuser
+getconf destid_file /root/.ssh/id_rsa
+getconf sshoptions
+
+failedcount=0
+
+# strip leading mailbox specifier (eg. mdbox:; maildir:, etc)
+stripped_destdir=${destdir/*:/}
+stripped_srcdir=${srcdir/*:/}
+
+# strip trailing /
+destdir=${destdir%/}
+srcdir=${srcdir%/}
+
+if [ -n "$srcconffile" ]; then
+ srcconffile="-c $srcconffile"
+fi
+
+if [ -n "$destconffile" ]; then
+ destconffile="-c $destconffile"
+fi
+
+[ -d $stripped_srcdir ] || fatal "source directory $srcdir doesn't exist"
+
+
+##################################################################
+### FUNCTIONS
+
+function do_user() {
+ local user=$1
+ local btype=$2
+ local letter=${user:0:1}
+ local target="$stripped_destdir/$letter/$user/$btype.1"
+ local failedcount=0
+ local ret=0
+
+ debug "syncing"
+ while [ $failedcount -lt 3 ]; do
+ debug $DSYNC $testflags -u $user backup $srcconffile \
+ ssh -i $destid_file $destuser@$desthost $DSYNC $destconffile \
+ -u $user 2>&1
+ ret=`$DSYNC $testflags -u $user backup $srcconffile \
+ ssh -i $destid_file $destuser@$desthost $DSYNC $destconffile \
+ -u $user 2>&1`
+ ret=$?
+ if [ $ret == 2 ]; then
+ # dsync needs to be run again
+ let "failedcount = failedcount + 1"
+ elif [ $ret == 0 ]; then
+ # things worked, so we break out of the loop
+ break
+ ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions "date +%c%n%s > $stripped_destdir/$letter/$user/$btype.1/created"
+ elif [ $ret != 0 ]; then
+ # things did not work in a good way, report it and try again
+ warning "dsync $user failed"
+ warning " returned: $ret"
+ let "failedcount = failedcount + 1"
+ fi
+
+ if [ $failedcount -gt 3 ]; then
+ warning "dsync failed 3 times for this user -- something is not working right. bailing out."
+ fi
+ done
+}
+
+# remove any maildirs from backup which might have been deleted
+# and add new ones which have just been created.
+# (actually, it just moved them to the directory "deleted")
+
+function do_remove() {
+ local tmp1=`maketemp dsync-tmp-file`
+ local tmp2=`maketemp dsync-tmp-file`
+
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost mkdir -p "${stripped_destdir}/deleted"
+ cd "$stripped_srcdir"
+ for userdir in `ls -d1 */`; do
+ ls -1 "$stripped_srcdir/$userdir" | sort > $tmp1
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost ls -1 "$stripped_destdir/$userdir" | sort > $tmp2
+ for deluser in `join -v 2 $tmp1 $tmp2`; do
+ [ "$deluser" != "" ] || continue
+ info "removing $destuser@$desthost:$stripped_destdir/$userdir$deluser/"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost mv "$stripped_destdir/$userdir$deluser/" "$stripped_destdir/deleted"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost "date +%c%n%s > '$stripped_destdir/deleted/$deluser/deleted_on'"
+ done
+ done
+ rm $tmp1
+ rm $tmp2
+}
+
+function do_rotate() {
+ [ "$rotate" == "yes" ] || return;
+ local user=$1
+ local letter=${user:0:1}
+ local backuproot="$stripped_destdir/$letter/$user"
+(
+ ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions <<EOF
+##### BEGIN REMOTE SCRIPT #####
+ seconds_daily=86400
+ seconds_weekly=604800
+ seconds_monthly=2628000
+ keepdaily=$keepdaily
+ keepweekly=$keepweekly
+ keepmonthly=$keepmonthly
+ now=\`date +%s\`
+
+ if [ ! -d "$backuproot" ]; then
+ echo "Debug: skipping rotate of $user. $backuproot doesn't exist."
+ exit
+ fi
+
+ for rottype in daily weekly monthly; do
+ seconds=\$((seconds_\${rottype}))
+
+ dir="$backuproot/\$rottype"
+ if [ ! -d \$dir.1 ]; then
+ echo "Debug: \$dir.1 does not exist, skipping."
+ continue 1
+ fi
+
+ # Rotate the current list of backups, if we can.
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ #echo "Debug: oldest \$oldest"
+ [ "\$oldest" == "" ] && oldest=0
+ for (( i=\$oldest; i > 0; i-- )); do
+ if [ -d \$dir.\$i ]; then
+ if [ -f \$dir.\$i/created ]; then
+ created=\`tail -1 \$dir.\$i/created\`
+ else
+ created=0
+ fi
+ cutoff_time=\$(( now - (seconds*(i-1)) ))
+ if [ ! \$created -gt \$cutoff_time ]; then
+ next=\$(( i + 1 ))
+ if [ ! -d \$dir.\$next ]; then
+ echo "Debug: \$rottype.\$i --> \$rottype.\$next"
+ mv \$dir.\$i \$dir.\$next
+ date +%c%n%s > \$dir.\$next/rotated
+ else
+ echo "Debug: skipping rotation of \$dir.\$i because \$dir.\$next already exists."
+ fi
+ else
+ echo "Debug: skipping rotation of \$dir.\$i because it was created" \$(( (now-created)/86400)) "days ago ("\$(( (now-cutoff_time)/86400))" needed)."
+ fi
+ fi
+ done
+ done
+
+ max=\$((keepdaily+1))
+ if [ \( \$keepweekly -gt 0 -a -d $backuproot/daily.\$max \) -a ! -d $backuproot/weekly.1 ]; then
+ echo "Debug: daily.\$max --> weekly.1"
+ mv $backuproot/daily.\$max $backuproot/weekly.1
+ date +%c%n%s > $backuproot/weekly.1/rotated
+ fi
+
+ max=\$((keepweekly+1))
+ if [ \( \$keepmonthly -gt 0 -a -d $backuproot/weekly.\$max \) -a ! -d $backuproot/monthly.1 ]; then
+ echo "Debug: weekly.\$max --> monthly.1"
+ mv $backuproot/weekly.\$max $backuproot/monthly.1
+ date +%c%n%s > $backuproot/monthly.1/rotated
+ fi
+
+ for rottype in daily weekly monthly; do
+ max=\$((keep\${rottype}+1))
+ dir="$backuproot/\$rottype"
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ [ "\$oldest" == "" ] && oldest=0
+ # if we've rotated the last backup off the stack, remove it.
+ for (( i=\$oldest; i >= \$max; i-- )); do
+ if [ -d \$dir.\$i ]; then
+ if [ -d $backuproot/rotate.tmp ]; then
+ echo "Debug: removing rotate.tmp"
+ rm -rf $backuproot/rotate.tmp
+ fi
+ echo "Debug: moving \$rottype.\$i to rotate.tmp"
+ mv \$dir.\$i $backuproot/rotate.tmp
+ fi
+ done
+ done
+####### END REMOTE SCRIPT #######
+EOF
+) | (while read a; do passthru $a; done)
+
+}
+
+
+function setup_remote_dirs() {
+ local user=$1
+ local backuptype=$2
+ local letter=${user:0:1}
+ local dir="$stripped_destdir/$letter/$user/$backuptype"
+ local tmpdir="$stripped_destdir/$letter/$user/rotate.tmp"
+(
+ ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions <<EOF
+ if [ ! -d $stripped_destdir ]; then
+ echo "Fatal: Destination directory $stripped_destdir does not exist on host $desthost."
+ exit 1
+ elif [ -d $dir.1 ]; then
+ if [ -f $dir.1/created ]; then
+ echo "Warning: $dir.1 already exists. Overwriting contents."
+ else
+ echo "Warning: we seem to be resuming a partially written $dir.1"
+ fi
+ else
+ if [ -d $tmpdir ]; then
+ mv $tmpdir $dir.1
+ if [ \$? == 1 ]; then
+ echo "Fatal: could not mv $stripped_destdir/rotate.tmp $dir.1 on host $desthost"
+ exit 1
+ fi
+ else
+ mkdir --parents $dir.1
+ if [ \$? == 1 ]; then
+ echo "Fatal: could not create directory $dir.1 on host $desthost"
+ exit 1
+ fi
+ fi
+ if [ -d $dir.2 ]; then
+ echo "Debug: update links $backuptype.2 --> $backuptype.1"
+ cp -alf $dir.2/. $dir.1
+ #if [ \$? == 1 ]; then
+ # echo "Fatal: could not create hard links to $dir.1 on host $desthost"
+ # exit 1
+ #fi
+ fi
+ fi
+ [ -f $dir.1/created ] && rm $dir.1/created
+ [ -f $dir.1/rotated ] && rm $dir.1/rotated
+ exit 0
+EOF
+) | (while read a; do passthru $a; done)
+
+ if [ $? == 1 ]; then exit; fi
+}
+
+###
+##################################################################
+
+# see if we can login
+debug "ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions 'echo -n 1'"
+if [ ! $test ]; then
+ result=`ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions 'echo -n 1' 2>&1`
+ if [ "$result" != "1" ]; then
+ fatal "Can't connect to $desthost as $destuser using $destid_file."
+ fi
+fi
+
+## SANITY CHECKS ##
+status=`ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost "[ -d \"$stripped_destdir\" ] && echo 'ok'"`
+if [ "$status" != "ok" ]; then
+ fatal "Destination directory $stripped_destdir doesn't exist!"
+ exit
+fi
+
+### REMOVE OLD MAILDIRS ###
+
+if [ "$remove" == "yes" ]; then
+ do_remove
+fi
+
+### MAKE BACKUPS ###
+
+if [ "$backup" == "yes" ]; then
+ if [ $keepdaily -gt 0 ]; then btype=daily
+ elif [ $keepweekly -gt 0 ]; then btype=weekly
+ elif [ $keepmonthly -gt 0 ]; then btype=monthly
+ else fatal "keeping no backups"; fi
+
+ if [ "$testuser" != "" ]; then
+ cd "$stripped_srcdir/${user:0:1}"
+ do_rotate $testuser
+ setup_remote_dirs $testuser $btype
+ do_user $testuser $btype
+ else
+ [ -d "$stripped_srcdir" ] || fatal "directory $stripped_srcdir not found."
+ for user in `@DOVEADM@ user \* | cut -d@ -f1`
+ do
+ debug $user
+ [ "$user" != "" ] || continue
+ do_rotate $user
+ setup_remote_dirs $user $btype
+ do_user $user $btype
+ done
+ fi
+fi
+
diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in
index ea794c9..6f3281e 100644
--- a/handlers/dup.helper.in
+++ b/handlers/dup.helper.in
@@ -193,7 +193,7 @@ do_dup_gpg_signkey() {
}
do_dup_gpg_passphrase() {
- local question="Enter the passphrase needed to unlock the GnuPG key:"
+ local question="Enter the passphrase needed to unlock the GnuPG encryption key:"
REPLY=
while [ -z "$REPLY" -o -z "$dup_gpg_password" ]; do
passwordBox "$dup_title - GnuPG" "$question"
@@ -202,6 +202,16 @@ do_dup_gpg_passphrase() {
done
}
+do_dup_gpg_sign_passphrase() {
+ local question="Enter the passphrase needed to unlock the GnuPG signature key:"
+ REPLY=
+ while [ -z "$REPLY" -o -z "$dup_gpg_signpassword" ]; do
+ passwordBox "$dup_title - GnuPG" "$question"
+ [ $? = 0 ] || return 1
+ dup_gpg_signpassword="$REPLY"
+ done
+}
+
do_dup_gpg() {
# symmetric or public key encryption ?
@@ -226,6 +236,9 @@ do_dup_gpg() {
# a passphrase is alway needed
do_dup_gpg_passphrase
+ # If the signature key differs, we also need a passphrase for it
+ [ -n "$dup_gpg_signkey" -a -n "$dup_gpg_encryptkey" -a "$dup_gpg_signkey" != "$dup_gpg_encryptkey" ] && do_dup_gpg_sign_passphrase
+
_gpg_done="(DONE)"
setDefault adv
# TODO: replace the above line by the following when do_dup_conn is written
@@ -329,10 +342,19 @@ encryptkey = $dup_gpg_encryptkey
# if not set, encryptkey will be used.
signkey = $dup_gpg_signkey
-# password
-# NB: neither quote this, nor should it include any quotes
+## password used to unlock the encryption key
+## NB: neither quote this, nor should it contain any quotes,
+## an example setting would be:
+## password = a_very_complicated_passphrase
password = $dup_gpg_password
+## password used to unlock the signature key, used only if
+## it differs from the encryption key
+## NB: neither quote this, nor should it contain any quotes,
+## an example setting would be:
+## signpassword = a_very_complicated_passphrase
+signpassword = $dup_gpg_signpassword
+
######################################################
## source section
## (where the files to be backed up are coming from)
@@ -482,8 +504,10 @@ bandwidthlimit = $dup_bandwidth
## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
##
## duplicity >= 0.6.17
-## ------------------
-## supports only "-o IdentityFile=..."
+## -------------------
+## supports only "-oIdentityFile=..." since duplicity >=0.6.17 uses paramiko,
+## a ssh python module.
+## warning: requires no space beetween "-o" and "IdentityFile=...".
##
## Default:
# sshoptions =
@@ -582,6 +606,7 @@ dup_wizard() {
dup_gpg_onekeypair="yes"
dup_gpg_signkey=""
dup_gpg_password=""
+ dup_gpg_signpassword=""
dup_nicelevel=19
dup_testconnect=yes
dup_options=
diff --git a/handlers/dup.in b/handlers/dup.in
index 9eb2fbb..3c586c6 100644
--- a/handlers/dup.in
+++ b/handlers/dup.in
@@ -12,6 +12,7 @@ getconf tmpdir
setsection gpg
getconf password
+getconf signpassword
getconf sign no
getconf encryptkey
getconf signkey
@@ -46,6 +47,7 @@ destdir=${destdir%/}
[ -n "$desturl" -o -n "$destdir" ] || fatal "The destination directory (destdir) must be set when desturl is not used."
[ -n "$include" -o -n "$vsinclude" ] || fatal "No source includes specified"
[ -n "$password" ] || fatal "The password option must be set."
+[ -n "$signpassword" -a -n "$signkey" -a -n "$encryptkey" -a "$signkey" != "$encryptkey" ] || fatal "The signpassword option must be set because signkey is different from encryptkey."
if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "s3+http" ]; then
[ -n "$awsaccesskeyid" -a -n "$awssecretaccesskey" ] || fatal "AWS access keys must be set for S3 backups."
fi
@@ -55,6 +57,13 @@ fi
if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "ftp" ]; then
[ -n "$ftp_password" ] || fatal "ftp_password must be set for FTP backups."
fi
+if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "file" ]; then
+ if [ ! -e "`echo $desturl | @AWK@ -F '://' '{print $2}'`" ]; then
+ fatal "The destination directory ($desturl) does not exist."
+ elif [ ! -d "`echo $desturl | @AWK@ -F '://' '{print $2}'`" ]; then
+ fatal "The destination ($desturl) is not a directory."
+ fi
+fi
### VServers
# If vservers are configured, check that the ones listed in $vsnames do exist.
@@ -95,7 +104,7 @@ fi
### COMMAND-LINE MANGLING ######################################################
### initialize $execstr*
-execstr_precmd=
+execstr_precmd='LC_ALL=C'
execstr_command=
execstr_options="$options --no-print-statistics"
execstr_source=
@@ -110,16 +119,21 @@ fi
### duplicity version (ignore anything else than 0-9 and ".")
duplicity_version="`duplicity --version | @AWK@ '{print $2}' | @SED@ 's/[^.[:digit:]]//g'`"
-duplicity_major="`echo $duplicity_version | @AWK@ -F '.' '{print $1}'`"
-duplicity_minor="`echo $duplicity_version | @AWK@ -F '.' '{print $2}'`"
-duplicity_sub="`echo $duplicity_version | @AWK@ -F '.' '{print $3}'`"
### ssh/scp/sftp options (duplicity < 0.4.3 is unsupported)
## duplicity >= 0.6.17 : paramiko backend
-if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -ge 17 ]; then
+if version_ge "$duplicity_version" '0.6.17'; then
if [ -n "$sshoptions" ]; then
- echo "$sshoptions" | grep -Eqs '^-o[[:space:]]*IdentityFile=[^ ]+$' \
- || warning 'duplicity >= 0.6.17 only supports the IdentityFile SSH option'
+ if echo "$sshoptions" | grep -Eqs '^-o[[:space:]]*IdentityFile=[^ ]+$' ; then
+ spaceless_sshoptions="$(echo -n "$sshoptions" | @SED@ 's/^-o[[:space:]]*/-o/')"
+ if [ "$spaceless_sshoptions" != "$sshoptions" ] ; then
+ warning 'Since duplicity >= 0.6.17, sshoptions option requires no space between -o and IdentityFile.'
+ warning 'The bad space has been ignored. Update your duplicity handler config file to suppress this message.'
+ sshoptions="$spaceless_sshoptions"
+ fi
+ else
+ warning 'duplicity >= 0.6.17 only supports the IdentityFile SSH option'
+ fi
fi
execstr_options="${execstr_options} --ssh-options '$sshoptions'"
if [ "$bandwidthlimit" != 0 ]; then
@@ -176,7 +190,9 @@ else
fi
### Cleanup options
-execstr_options="${execstr_options} --extra-clean"
+if ! version_ge "$duplicity_version" '0.6.20'; then
+ execstr_options="${execstr_options} --extra-clean"
+fi
### Temporary directory
if [ -n "$tmpdir" ]; then
@@ -269,6 +285,7 @@ fi
debug "$execstr_precmd duplicity cleanup --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
@@ -288,6 +305,7 @@ if [ "$keep" != "yes" ]; then
debug "$execstr_precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
@@ -306,10 +324,11 @@ fi
# remove-all-inc-of-but-n-full : remove increments of older full backups : only keep latest ones
if [ "$keep" != "yes" ]; then
if [ "$keepincroffulls" != "all" ]; then
- if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -ge 10 ]; then
+ if version_ge "$duplicity_version" '0.6.10'; then
debug "$execstr_precmd duplicity remove-all-inc-of-but-n-full $keepincroffulls --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
@@ -332,6 +351,7 @@ debug "$execstr_precmd duplicity $execstr_command $execstr_options $execstr_sour
if [ ! $test ]; then
outputfile=`maketemp backupout`
export PASSPHRASE=$password
+ export SIGN_PASSPHRASE=$signpassword
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
diff --git a/handlers/maildir.in b/handlers/maildir.in
index 148c30d..64ac987 100644
--- a/handlers/maildir.in
+++ b/handlers/maildir.in
@@ -57,7 +57,6 @@ getconf rotate yes
getconf remove yes
getconf backup yes
-getconf loadlimit 5
getconf speedlimit 0
getconf keepdaily 5
getconf keepweekly 3
@@ -86,9 +85,6 @@ if [ $test ]; then
testflags="--dry-run -v"
fi
-rsyncflags="$testflags -e 'ssh -p $destport -i $destid_file $sshoptions' -r -v --ignore-existing --delete --size-only --bwlimit=$speedlimit"
-excludes="--exclude '.Trash/\*' --exclude '.Mistakes/\*' --exclude '.Spam/\*'"
-
##################################################################
### FUNCTIONS
diff --git a/handlers/mysql.in b/handlers/mysql.in
index 65deebb..ceed620 100644
--- a/handlers/mysql.in
+++ b/handlers/mysql.in
@@ -256,10 +256,15 @@ then
for db in $databases
do
DUMP_BASE="$MYSQLDUMP $defaultsfile $sqldumpoptions"
- if [ "$db" = "information_schema" ] || [ "$db" = "performance_schema" ]
- then
- DUMP_BASE="${DUMP_BASE} --skip-lock-tables"
- fi
+
+ case "$db" in
+ information_schema)
+ DUMP_BASE="${DUMP_BASE} --skip-lock-tables"
+ ;;
+ performance_schema)
+ DUMP_BASE="${DUMP_BASE} --skip-lock-tables --skip-events"
+ ;;
+ esac
# Dumping structure and data
DUMP="$DUMP_BASE $ignore $db"
@@ -271,7 +276,7 @@ then
DUMP_STRUCT="$DUMP_BASE --no-data $db"
for qualified_table in $nodata
do
- table=$( expr match "$qualified_table" "$db\.\([^\w]*\)" )
+ table=$( expr match "$qualified_table" "$db\.\(.\+\)" )
DUMP_STRUCT="$DUMP_STRUCT $table"
done
DUMP="( $DUMP; $DUMP_STRUCT )"
@@ -297,7 +302,7 @@ then
if [ "$compress" == "yes" ]; then
execstr="$DUMP | $GZIP $GZIP_OPTS > '$dumpdir/${db}.sql.gz'"
else
- execstr="$DUMP -r '$dumpdir/${db}.sql'"
+ execstr="$DUMP > '$dumpdir/${db}.sql'"
fi
fi
debug "su $user -c \"$execstr\""
diff --git a/handlers/rdiff.helper.in b/handlers/rdiff.helper.in
index 1597305..e2ddc46 100644
--- a/handlers/rdiff.helper.in
+++ b/handlers/rdiff.helper.in
@@ -124,7 +124,7 @@ do_rdiff_dest() {
REPLY=
while [ -z "$REPLY" -o -z "$rdiff_directory" -o -z "$rdiff_host" -o -z "$rdiff_user" ]
do
- formBegin "$rdiff_title - destination: last three items are required"
+ formBegin "$rdiff_title - destination"
formItem "keep" "$rdiff_keep"
formItem "dest_directory" "$rdiff_directory"
formItem "dest_host" "$rdiff_host"
@@ -282,12 +282,33 @@ do_rdiff_con() {
setDefault finish
}
+do_rdiff_adv() {
+ booleanBox "$rdiff_title" "Should backupninja write program output as Info messages rather than Debug messages?" no
+ if [ $? = 0 ]; then
+ rdiff_output_as_info=yes
+ else
+ rdiff_output_as_info=no
+ fi
+}
+
do_rdiff_finish() {
get_next_filename $configdirectory/90.rdiff
cat > $next_filename <<EOF
# options = --force
# when = everyday at 02
+## should backupninja write program output as Info messages rather than Debug
+## messages? (default: no)
+## Usually rdiff-backup output (for increment expiration and backup) is written
+## to output as Debug messages; this option causes backupninja to use Info-level
+## messages instead. Since backup reports include Info messages, this option is
+## useful to receive output like rdiff-backup session statistics in reports. In
+## addition, since rdiff-backup has a habit of using a zero exit code when
+## non-fatal errors are encountered (causing backupninja to conclude the backup
+## was entirely successful), this option is useful for inspecting non-fatal
+## filesystem and permission errors from rdiff-backup.
+output_as_info = $rdiff_output_as_info
+
[source]
type = local
keep = $rdiff_keep
@@ -378,6 +399,7 @@ rdiff_main_menu() {
src "$srcitem" \
dest "$destitem" \
conn "$conitem" \
+ adv "$advitem" \
finish "finish and create config file"
[ $? = 0 ] || return
result="$REPLY"
@@ -411,6 +433,7 @@ rdiff_wizard() {
rdiff_type=remote
rdiff_user=
rdiff_host=
+ rdiff_output_as_info="no"
# Global variables whose '*' shall not be expanded
set -o noglob
diff --git a/handlers/rdiff.in b/handlers/rdiff.in
index c3c8d1d..471a3d7 100644
--- a/handlers/rdiff.in
+++ b/handlers/rdiff.in
@@ -85,6 +85,7 @@ getconf testconnect yes
getconf nicelevel 0
getconf bwlimit
getconf ignore_version no
+getconf output_as_info no
setsection source
getconf type; sourcetype=$type
@@ -182,7 +183,11 @@ if [ "$keep" != yes ]; then
if [ $test = 0 ]; then
output="`su -c "$removestr" 2>&1`"
if [ $? = 0 ]; then
- debug $output
+ if [ "$output_as_info" == "yes" ]; then
+ info $output
+ else
+ debug $output
+ fi
info "Removing backups older than $keep days succeeded."
else
warning $output
@@ -268,7 +273,11 @@ debug "$execstr"
if [ $test = 0 ]; then
output=`nice -n $nicelevel su -c "$execstr" 2>&1`
if [ $? = 0 ]; then
- debug $output
+ if [ "$output_as_info" == "yes" ]; then
+ info $output
+ else
+ debug $output
+ fi
info "Successfully finished backing up source $label"
else
error $output
diff --git a/handlers/rsync.in b/handlers/rsync.in
index 81bd5b4..d0c211c 100644
--- a/handlers/rsync.in
+++ b/handlers/rsync.in
@@ -39,7 +39,6 @@
# keepdaily = for long storage format, specify the number of daily backup increments
# keepweekly = for long storage format, specify the number of weekly backup increments
# keepmonthly = for long storage format, specify the number of monthly backup increments
-# lockfile = lockfile to be kept during backup execution
# nicelevel = rsync command nice level
# enable_mv_timestamp_bug = set to "yes" if your system isnt handling timestamps correctly
# tmp = temp folder
@@ -129,7 +128,6 @@ function eval_config {
getconf keepdaily 5
getconf keepweekly 3
getconf keepmonthly 1
- getconf lockfile
getconf nicelevel 0
getconf enable_mv_timestamp_bug no
getconf tmp /tmp
@@ -277,9 +275,15 @@ function eval_config {
mv=move_files
fi
- for path in $exclude; do
- excludes="$excludes --exclude=$path"
+ set -o noglob
+ SAVEIFS=$IFS
+ IFS=$(echo -en "\n\b")
+ for i in $exclude; do
+ str="${i//__star__/*}"
+ excludes="${excludes} --exclude='$str'"
done
+ IFS=$SAVEIFS
+ set +o noglob
# Make sure we'll run bash at the destination
ssh_cmd="$ssh_cmd /bin/bash"
@@ -394,8 +398,8 @@ function rotate_long {
local metadata
if [ ! -d "$backuproot" ]; then
- echo "Debug: skipping rotate of $backuproot as it doesn't exist."
- exit
+ warning "Skipping rotate of $backuproot as it doesn't exist."
+ return
fi
for rottype in daily weekly monthly; do
@@ -408,12 +412,12 @@ function rotate_long {
echo "Debug: $dir.1 does not exist, skipping."
continue 1
elif [ ! -f $metadata.1/created ] && [ ! -f $metadata.1/rotated ]; then
- echo "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
+ warning "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
# Rotate the current list of backups, if we can.
- oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1`
+ oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0
for (( i=$oldest; i > 0; i-- )); do
if [ -d $dir.$i ]; then
@@ -424,11 +428,16 @@ function rotate_long {
else
created=0
fi
+ # Validate created date
+ if [ -z "$created" ] || echo $created | grep -v -q -e '^[0-9]*$'; then
+ warning "Invalid metadata $created. Skipping rotation."
+ break
+ fi
cutoff_time=$(( now - (seconds*(i-1)) ))
if [ ! $created -gt $cutoff_time ]; then
next=$(( i + 1 ))
if [ ! -d $dir.$next ]; then
- echo "Debug: $rottype.$i --> $rottype.$next"
+ debug "$rottype.$i --> $rottype.$next"
$nice mv $dir.$i $dir.$next
mkdir -p $metadata.$next
date +%c%n%s > $metadata.$next/rotated
@@ -436,10 +445,10 @@ function rotate_long {
$nice mv $metadata.$i/created $metadata.$next
fi
else
- echo "Debug: skipping rotation of $dir.$i because $dir.$next already exists."
+ debug "skipping rotation of $dir.$i because $dir.$next already exists."
fi
else
- echo "Debug: skipping rotation of $dir.$i because it was created" $(( (now-created)/86400)) "days ago ("$(( (now-cutoff_time)/86400))" needed)."
+ debug "skipping rotation of $dir.$i because it was created" $(( (now-created)/86400)) "days ago ("$(( (now-cutoff_time)/86400))" needed)."
fi
fi
done
@@ -447,9 +456,11 @@ function rotate_long {
max=$((keepdaily+1))
if [ $keepweekly -gt 0 -a -d $backuproot/daily.$max -a ! -d $backuproot/weekly.1 ]; then
- echo "Debug: daily.$max --> weekly.1"
+ debug "daily.$max --> weekly.1"
$nice mv $backuproot/daily.$max $backuproot/weekly.1
mkdir -p $backuproot/metadata/weekly.1
+ # Update increment folder date and setup metadata
+ $touch $backuproot/weekly.1
date +%c%n%s > $backuproot/metadata/weekly.1/rotated
#if [ -f $backuproot/metadata/daily.$max/created ]; then
# $nice mv $backuproot/metadata/daily.$max/created $backuproot/metadata/weekly.1/
@@ -458,9 +469,11 @@ function rotate_long {
max=$((keepweekly+1))
if [ $keepmonthly -gt 0 -a -d $backuproot/weekly.$max -a ! -d $backuproot/monthly.1 ]; then
- echo "Debug: weekly.$max --> monthly.1"
+ debug "weekly.$max --> monthly.1"
$nice mv $backuproot/weekly.$max $backuproot/monthly.1
mkdir -p $backuproot/metadata/monthly.1
+ # Update increment folder date and setup metadata
+ $touch $backuproot/monthly.1
date +%c%n%s > $backuproot/metadata/monthly.1/rotated
#if [ -f $backuproot/metadata/weekly.$max/created ]; then
# $nice mv $backuproot/metadata/weekly.$max/created $backuproot/metadata/weekly.1/
@@ -470,16 +483,16 @@ function rotate_long {
for rottype in daily weekly monthly; do
max=$((keep${rottype}+1))
dir="$backuproot/$rottype"
- oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1`
+ oldest=`find $backuproot -maxdepth 1 -type d -name $rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1`
[ "$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it.
for (( i=$oldest; i >= $max; i-- )); do
if [ -d $dir.$i ]; then
if [ -d $backuproot/rotate.tmp ]; then
- echo "Debug: removing rotate.tmp"
+ debug "removing rotate.tmp"
$nice rm -rf $backuproot/rotate.tmp
fi
- echo "Debug: moving $rottype.$i to rotate.tmp"
+ debug "moving $rottype.$i to rotate.tmp"
$nice mv $dir.$i $backuproot/rotate.tmp
fi
done
@@ -512,7 +525,7 @@ function rotate_long_remote {
now=\`date +%s\`
if [ ! -d "$backuproot" ]; then
- echo "Debug: skipping rotate of $backuproot as it doesn't exist."
+ echo "Fatal: skipping rotate of $backuproot as it doesn't exist."
exit
fi
@@ -531,7 +544,7 @@ function rotate_long_remote {
fi
# Rotate the current list of backups, if we can.
- oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0
for (( i=\$oldest; i > 0; i-- )); do
if [ -d \$dir.\$i ]; then
@@ -542,6 +555,11 @@ function rotate_long_remote {
else
created=0
fi
+ # Validate created date
+ if [ -z "\$created" ] || echo \$created | grep -v -q -e '^[0-9]*$'; then
+ echo "Warning: Invalid metadata \$created. Skipping rotation."
+ break
+ fi
cutoff_time=\$(( now - (seconds*(i-1)) ))
if [ ! \$created -gt \$cutoff_time ]; then
next=\$(( i + 1 ))
@@ -568,6 +586,8 @@ function rotate_long_remote {
echo "Debug: daily.\$max --> weekly.1"
$nice mv $backuproot/daily.\$max $backuproot/weekly.1
mkdir -p $backuproot/metadata/weekly.1
+ # Update increment folder date and setup metadata
+ $touch $backuproot/weekly.1
date +%c%n%s > $backuproot/metadata/weekly.1/rotated
#if [ -f $backuproot/metadata/daily.\$max/created ]; then
# $nice mv $backuproot/metadata/daily.\$max/created $backuproot/metadata/weekly.1/
@@ -579,6 +599,8 @@ function rotate_long_remote {
echo "Debug: weekly.\$max --> monthly.1"
$nice mv $backuproot/weekly.\$max $backuproot/monthly.1
mkdir -p $backuproot/metadata/monthly.1
+ # Update increment folder date and setup metadata
+ $touch $backuproot/monthly.1
date +%c%n%s > $backuproot/metadata/monthly.1/rotated
#if [ -f $backuproot/metadata/weekly.\$max/created ]; then
# $nice mv $backuproot/metadata/weekly.\$max/created $backuproot/metadata/weekly.1/
@@ -588,7 +610,7 @@ function rotate_long_remote {
for rottype in daily weekly monthly; do
max=\$((keep\${rottype}+1))
dir="$backuproot/\$rottype"
- oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ -e 's/^.*\.//' | sort -n | tail -1\`
[ "\$oldest" == "" ] && oldest=0
# if we've rotated the last backup off the stack, remove it.
for (( i=\$oldest; i >= \$max; i-- )); do
@@ -829,7 +851,11 @@ function update_metadata {
if [ "$dest" == "local" ]; then
metadata="`dirname $dest_path`/metadata/`basename $dest_path`"
mkdir -p $metadata
- date +%c%n%s > $metadata/created
+ # Use the backup start time and not the time the backup was
+ # finished, otherwise daily rotations might not take place.
+ # If we used backup end time, in the next handler run
+ # we might not have $now - $created >= 24:00
+ echo "$starttime" > $metadata/created
$touch $backupdir/$SECTION/$suffix
else
folder="`echo $dest_path | cut -d : -f 2`"
@@ -839,7 +865,11 @@ function update_metadata {
$ssh_cmd <<EOF
##### BEGIN REMOTE SCRIPT #####
mkdir -p $metadata
- date +%c%n%s > $metadata/created
+ # Use the backup start time and not the time the backup was
+ # finished, otherwise daily rotations might not take place.
+ # If we used backup end time, in the next handler run
+ # we might not have $now - $created >= 24:00
+ echo "$starttime" > $metadata/created
##### END REMOTE SCRIPT #######
EOF
) | (while read a; do passthru $a; done)
@@ -870,45 +900,6 @@ function test_connect {
}
-function set_lockfile {
-
- if [ ! -z "$lockfile" ]; then
- mkdir -p `dirname $lockfile`
- if ( set -o noclobber; echo "$$" > "$lockfile" ) &> /dev/null; then
- trap 'unset_lockfile' INT TERM EXIT
- else
- fatal "Could not create lockfile $lockfile, exiting"
- fi
- fi
-
-}
-
-function unset_lockfile {
-
- if [ ! -z "$lockfile" ]; then
- $rm -f $lockfile || warning "Could not remove lockfile $lockfile"
- fi
-
-}
-
-function check_lockfile {
-
- local pid process
-
- if [ ! -z "$lockfile" ] && [ -f "$lockfile" ]; then
- pid="`cat $lockfile`"
- process="`ps --no-headers -o comm $pid`"
- if [ "$?" == "0" ] && [ "`ps --no-headers -o comm $$`" == "$process" ]; then
- info "Another backup is running for $lockfile, skipping run"
- exit
- else
- info "Found old lockfile $lockfile, removing it"
- unset_lockfile
- fi
- fi
-
-}
-
function set_filelist {
filelist_flag=""
@@ -1095,17 +1086,58 @@ function end_mux {
}
+function set_pipefail {
+
+ # Save initial pipefail status for later restoration
+ if echo "$SHELLOPTS" | grep -q ":pipefail"; then
+ pipefail="-o"
+ else
+ pipefail="+o"
+ fi
+
+ # Ensure that a non-zero rsync exit status is caught by our handler
+ set -o pipefail
+
+}
+
+function restore_pipefail {
+
+ if [ ! -z "$pipefail" ]; then
+ set $pipefail pipefail
+ fi
+
+}
+
+function check_rsync_exit_status {
+
+ if [ -z "$1" ]; then
+ return
+ fi
+
+ case $1 in
+ 0)
+ return
+ ;;
+ 1|2|3|4|5|6|10|11|12|13|14|21)
+ fatal "Rsync error $1 when trying to transfer $SECTION"
+ ;;
+ *)
+ warning "Rsync error $1 when trying to transfer $SECTION"
+ ;;
+ esac
+
+}
+
# the backup procedure
eval_config
-check_lockfile
-set_lockfile
set_rsync_options
start_mux
stop_services
mount_rw
-echo "Starting backup at `date`" >> $log
+starttime="`date +%c%n%s`"
+echo "Starting backup at `echo "$starttime" | head -n 1`" >> $log
for SECTION in $include; do
@@ -1116,13 +1148,13 @@ for SECTION in $include; do
set_dest
info "Syncing $SECTION on $dest_path..."
- debug $nice $rsync "${rsync_options[@]}" $filelist_flag $excludes $batch_option $orig $dest_path
- $nice $rsync "${rsync_options[@]}" $filelist_flag $excludes $batch_option $orig $dest_path | tee -a $log
-
- if [ "$?" != "0" ]; then
- fatal "Rsync error when trying to transfer $SECTION"
- fi
+ command="$rsync ${rsync_options[@]} --delete-excluded $filelist_flag $excludes $batch_option $orig $dest_path"
+ debug $nice su -c "$command"
+ set_pipefail
+ $nice su -c "$command" | tee -a $log
+ check_rsync_exit_status $?
+ restore_pipefail
update_metadata
done
@@ -1130,7 +1162,6 @@ done
mount_ro
run_fsck
start_services
-unset_lockfile
end_mux
echo "Finnishing backup at `date`" >> $log
diff --git a/handlers/sys.helper.in b/handlers/sys.helper.in
index 8a2fb07..f728f51 100644
--- a/handlers/sys.helper.in
+++ b/handlers/sys.helper.in
@@ -20,6 +20,8 @@ sys_wizard() {
hardware="hardware = no"
luksheaders="luksheaders = no"
lvm="lvm = no"
+ mbr="mbr = no"
+ bios="bios = no"
for opt in $result; do
case $opt in
'"packages"') packages="packages = yes";;
@@ -28,6 +30,8 @@ sys_wizard() {
'"hardware"') hardware="hardware = yes";;
'"luksheaders"') luksheaders="luksheaders = yes";;
'"lvm"') lvm="lvm = yes";;
+ '"mbr"') mbr="mbr = yes";;
+ '"bios"') bios="bios = yes";;
esac
done
get_next_filename $configdirectory/10.sys
@@ -38,6 +42,8 @@ $sfdisk
$hardware
$luksheaders
$lvm
+$mbr
+$bios
# packagesfile = /var/backups/dpkg-selections.txt
# selectionsfile = /var/backups/debconfsel.txt
@@ -51,4 +57,3 @@ $lvm
EOF
chmod 600 $next_filename
}
-
diff --git a/handlers/sys.in b/handlers/sys.in
index 74133a3..605e583 100644
--- a/handlers/sys.in
+++ b/handlers/sys.in
@@ -25,14 +25,23 @@
# (4) hardware information.
# write to a text file the important things which hwinfo can gleen.
#
-# (5) the Luks header of every Luks block device, if option luksheaders
+# (5) the LUKS header of every LUKS block device, if option luksheaders
# is enabled.
-# in case you (have to) scramble such a Luks header (for some time),
+# in case you (have to) scramble such a LUKS header (for some time),
# and restore it later by running "dd if=luksheader.sda2.bin of=/dev/sda2"
# (MAKE SURE YOU PASS THE CORRECT DEVICE AS of= !!!)
#
# (6) LVM metadata for every detected volume group, if "lvm = yes"
#
+# (7) a copy of each device's MBR, if "mbr = yes". A master boot record
+# (MBR) is the 512-byte boot sector that is the first sector of a
+# partitioned data storage device of a hard disk. To restore the MBR
+# one could do something like: dd if=sda.mbr of=/dev/sda
+# (MAKE SURE YOU PASS THE CORRECT DEVICE AS of= !!!)
+# WARNING: Restoring the MBR with a mismatching partition table will
+# make your data unreadable and nearly impossible to recover
+#
+# (8) a copy of the BIOS, if "bios = yes" and flashrom is installed
if [ -f /etc/debian_version ]
then
@@ -44,6 +53,11 @@ then
os=redhat
debug "Redhat detected"
osversion="/etc/redhat-release"
+elif [ -f /etc/SuSE-release ]
+then
+ os=suse
+ debug "SuSE detected"
+ osversion="/etc/SuSE-release"
else
warning "Unknown OS detected!"
fi
@@ -57,14 +71,14 @@ if [ ! -d $parentdir ]; then
mkdir -p $parentdir
fi
-if [ $os = "debian" ]
+if [ "$os" = "debian" ]
then
getconf packagesfile $parentdir/dpkg-selections.txt
getconf packagemgr `which dpkg`
getconf packagemgroptions ' --get-selections *'
getconf selectionsfile $parentdir/debconfsel.txt
getconf debconfgetselections `which debconf-get-selections`
-elif [ $os = "redhat" ]
+elif [ "$os" = "redhat" ]
then
getconf packagesfile $parentdir/rpmpackages.txt
getconf packagemgr `which rpm`
@@ -72,6 +86,11 @@ then
getconf SYSREPORT `which sysreport`
getconf sysreport_options ' -norpm '
+elif [ "$os" = "suse" ]
+then
+ getconf packagesfile $parentdir/rpmpackages.txt
+ getconf packagemgr `which rpm`
+ getconf packagemgroptions ' -qa '
else
getconf packagesfile $parentdir/unknownOS.txt
fi
@@ -100,6 +119,12 @@ getconf VGS `which vgs`
getconf VGCFGBACKUP `which vgcfgbackup`
getconf lvm no
+getconf mbr no
+getconf mbrfile $parentdir/mbr.__star__.bin
+
+getconf FLASHROM `which flashrom`
+getconf bios no
+
getconf vsnames all
# If vservers are configured, check that the ones listed in $vsnames are running.
@@ -119,11 +144,11 @@ fi
if [ "$luksheaders" == "yes" ]; then
if [ ! -x "$DD" ]; then
- warning "can't find dd, skipping backup of Luks headers."
+ warning "can't find dd, skipping backup of LUKS headers."
luksheaders="no"
fi
if [ ! -x "$CRYPTSETUP" ]; then
- warning "can't find cryptsetup, skipping backup of Luks headers."
+ warning "can't find cryptsetup, skipping backup of LUKS headers."
luksheaders="no"
fi
fi
@@ -139,6 +164,20 @@ if [ "$lvm" == "yes" ]; then
fi
fi
+if [ "$mbr" == "yes" ]; then
+ if [ ! -x "$DD" ]; then
+ warning "can't find dd, skipping backup of MBR."
+ mbr="no"
+ fi
+fi
+
+if [ "$bios" == "yes" ]; then
+ if [ ! -x "$FLASHROM" ]; then
+ warning "can't find flashrom, skipping backup of BIOS."
+ mbr="no"
+ fi
+fi
+
## PACKAGES ##############################
#
@@ -348,13 +387,15 @@ echo "Getting information about the kernel."
echo
STATUS="Getting kernel version:"
catifexec "/bin/uname" "-a"
-STATUS="Checking module information:"
-catifexec "/sbin/lsmod"
-for x in $(/sbin/lsmod | /usr/bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
-) ; do
- STATUS="Checking module information $x:"
- catifexec "/sbin/modinfo" "$x"
-done
+if [ "$hardware" == "yes" ]; then
+ STATUS="Checking module information:"
+ catifexec "/sbin/lsmod"
+ for x in $(/sbin/lsmod | /usr/bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
+ ) ; do
+ STATUS="Checking module information $x:"
+ catifexec "/sbin/modinfo" "$x"
+ done
+fi
STATUS="Gathering information about your filesystems:"
catiffile "/proc/filesystems"
@@ -432,9 +473,11 @@ catiffile "/proc/rtc"
STATUS="Gathering information about your ide drivers:"
catiffile "/proc/ide"
-STATUS="Gathering information about your bus:"
-catifexec "/usr/bin/lspci"
-catiffile "/proc/bus"
+if [ "$hardware" == "yes" ]; then
+ STATUS="Gathering information about your bus:"
+ catifexec "/usr/bin/lspci"
+ catiffile "/proc/bus"
+fi
echo
echo "Getting disk and filesystem information."
@@ -591,8 +634,10 @@ fi
if [ "$luksheaders" == "yes" ]; then
devices=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^Disk /dev" | @AWK@ '{print $2}' | cut -d: -f1`
[ -n "$devices" ] || warning "No block device found"
+ partitions=`LC_ALL=C $SFDISK -l 2>/dev/null |grep "^/dev" | @AWK@ '{print $1}'`
+ [ -n "$partitions" ] || warning "No partitions found"
targetdevices=""
- for dev in $devices; do
+ for dev in $devices $partitions; do
[ -b $dev ] || continue
debug "$CRYPTSETUP isLuks $dev"
$CRYPTSETUP isLuks $dev
@@ -603,26 +648,45 @@ if [ "$luksheaders" == "yes" ]; then
label=${label//\//-}
outputfile=${luksheadersfile//__star__/$label}
# the following sizes are expressed in terms of 512-byte sectors
- debug "Let us find out the Luks header size for $dev"
+ debug "Let us find out the LUKS header size for $dev"
debug "$CRYPTSETUP luksDump \"$dev\" | grep '^Payload offset:' | @AWK@ '{print $3}'"
headersize=`$CRYPTSETUP luksDump "$dev" | grep '^Payload offset:' | @AWK@ '{print $3}'`
if [ $? -ne 0 ]; then
- warning "Could not compute the size of Luks header, skipping device $dev"
+ warning "Could not compute the size of LUKS header, skipping $dev"
continue
- elif [ -z "$headersize" -o -n "`echo \"$headersize\" | sed 's/[0-9]*//g'`" ]; then
- warning "The computed size of Luks header is not an integer, skipping device $dev"
+ elif [ -z "$headersize" -o -n "`echo \"$headersize\" | @SED@ 's/[0-9]*//g'`" ]; then
+ warning "The computed size of LUKS header is not an integer, skipping $dev"
continue
fi
- debug "Let us backup the Luks header of device $dev"
+ debug "Let us backup the LUKS header of $dev"
debug "$DD if=\"${dev}\" of=\"${outputfile}\" bs=512 count=\"${headersize}\""
output=`$DD if="${dev}" of="${outputfile}" bs=512 count="${headersize}" 2>&1`
exit_code=$?
if [ $exit_code -eq 0 ]; then
debug $output
- info "The Luks header of $dev was saved to $outputfile."
+ info "The LUKS header of $dev was saved to $outputfile."
else
debug $output
- fatal "The Luks header of $dev could not be saved."
+ fatal "The LUKS header of $dev could not be saved."
+ fi
+ done
+fi
+
+if [ "$mbr" == "yes" ]; then
+ devices=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^Disk /dev" | @AWK@ '{print $2}' | cut -d: -f1`
+ if [ "$devices" == "" ]; then
+ warning "No harddisks found"
+ fi
+ for dev in $devices; do
+ debug "Will try to backup MBR tables for device $dev"
+ [ -b $dev ] || continue
+ label=${dev#/dev/}
+ label=${label//\//-}
+ outputfile=${mbrfile//__star__/$label}
+ debug "$DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null"
+ $DD if=$dev of=$outputfile bs=512 count=1 2>/dev/null
+ if [ $? -ne 0 ]; then
+ warning "The MBR for $dev could not be saved."
fi
done
fi
@@ -686,3 +750,14 @@ if [ "$lvm" == "yes" ]; then
;;
esac
fi
+
+## BIOS ####################################
+
+if [ "$bios" == "yes" ]; then
+ debug "Trying to backup BIOS"
+ debug "$FLASHROM -r ${parentdir}/bios --programmer internal >/dev/null 2>&1"
+ $FLASHROM -r ${parentdir}/bios --programmer internal >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ warning "The BIOS could not be saved."
+ fi
+fi
diff --git a/handlers/tar.helper.in b/handlers/tar.helper.in
index 4a483be..91fec34 100644
--- a/handlers/tar.helper.in
+++ b/handlers/tar.helper.in
@@ -25,7 +25,8 @@ tar_wizard() {
"none" "do not filter trough" off \
"compress" "filter trough compress" off \
"gzip" "filter trough gzip" off \
- "bzip" "filter trough bzip" on
+ "bzip" "filter trough bzip" on \
+ "xz" "filter trough xz" off
[ $? = 1 ] && return;
result="$REPLY"
tar_compress="compress = $REPLY "
diff --git a/handlers/tar.in b/handlers/tar.in
index b4f8c58..7497306 100644
--- a/handlers/tar.in
+++ b/handlers/tar.in
@@ -48,6 +48,10 @@ case $compress in
compress_option="-j"
EXTENSION="tar.bz2"
;;
+ "xz")
+ compress_option="-J"
+ EXTENSION="tar.xz"
+ ;;
"none")
compress_option=""
;;
@@ -68,11 +72,13 @@ debug "Running backup: " $TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes
+if [ ! $test ]; then
$TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
$includes \
> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.list \
2> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.err
+fi
[ $? -ne 0 ] && fatal "Tar backup failed"