aboutsummaryrefslogtreecommitdiff
path: root/handlers
diff options
context:
space:
mode:
authorMicah Anderson <micah@riseup.net>2010-01-17 23:54:09 +1300
committerMicah Anderson <micah@riseup.net>2010-01-17 23:54:09 +1300
commitfe3620c6529cc449cd152ed58667e39d4a80257e (patch)
treeec83be42237855979a31f38cdce51f45cdbd7312 /handlers
parent18f28c2cb00286cc0ff790ef6b6914f3f22ec4ac (diff)
parent46eccf2fd0c894790405b2e3aae8dfb99f5a5d98 (diff)
downloadbackupninja-fe3620c6529cc449cd152ed58667e39d4a80257e.tar.gz
backupninja-fe3620c6529cc449cd152ed58667e39d4a80257e.tar.bz2
Merge branch 'master' into debian
Conflicts: AUTHORS ChangeLog handlers/maildir.in handlers/mysql.in handlers/rsync.in handlers/sys.helper.in handlers/sys.in src/backupninja.in
Diffstat (limited to 'handlers')
-rw-r--r--handlers/dup.helper.in157
-rw-r--r--handlers/dup.in35
-rw-r--r--handlers/ldap.helper.in51
-rw-r--r--handlers/ldap.in33
-rw-r--r--handlers/maildir.in447
-rw-r--r--handlers/makecd.helper.in115
-rw-r--r--handlers/makecd.in61
-rw-r--r--handlers/mysql.helper.in103
-rw-r--r--handlers/mysql.in364
-rw-r--r--handlers/pgsql.helper.in25
-rw-r--r--handlers/pgsql.in117
-rw-r--r--handlers/rdiff.helper.in61
-rw-r--r--handlers/rdiff.in216
-rw-r--r--handlers/rsync.in28
-rw-r--r--handlers/sh.in3
-rw-r--r--handlers/svn.in65
-rw-r--r--handlers/sys.helper.in29
-rwxr-xr-xhandlers/sys.in334
-rw-r--r--handlers/tar.helper.in21
-rw-r--r--handlers/tar.in23
-rw-r--r--handlers/trac.in56
-rw-r--r--handlers/wget114
22 files changed, 1354 insertions, 1104 deletions
diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in
index c1fbdd5..ae48e4c 100644
--- a/handlers/dup.helper.in
+++ b/handlers/dup.helper.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS dup:incremental_encrypted_remote_filesystem_backup"
@@ -13,11 +14,11 @@ do_dup_host_includes() {
[ -z "$dup_includes" ] && dup_includes="$dup_default_includes"
for i in $dup_includes; do
formItem include "$i"
- done
- formItem include ""
- formItem include ""
- formItem include ""
- formDisplay
+ done
+ formItem include ""
+ formItem include ""
+ formItem include ""
+ formDisplay
[ $? = 0 ] || return 1
dup_includes="$REPLY"
done
@@ -37,10 +38,10 @@ do_dup_vserver() {
[ -z "$dup_vsincludes" ] && dup_vsincludes="$dup_default_includes"
for i in $dup_vsincludes; do
formItem include "$i"
- done
- formItem include ""
- formItem include ""
- formItem include ""
+ done
+ formItem include ""
+ formItem include ""
+ formItem include ""
formDisplay
[ $? = 0 ] || return 1
dup_vsincludes="$REPLY"
@@ -69,26 +70,26 @@ do_dup_src() {
[ $? = 0 ] || return 1
case $host_or_vservers in
'host')
- do_dup_host_includes
- [ $? = 0 ] || return 1
- ;;
+ do_dup_host_includes
+ [ $? = 0 ] || return 1
+ ;;
'vservers')
- do_dup_vserver
- [ $? = 0 ] || return 1
- ;;
+ do_dup_vserver
+ [ $? = 0 ] || return 1
+ ;;
'both')
- do_dup_host_includes
- [ $? = 0 ] || return 1
- do_dup_vserver
- [ $? = 0 ] || return 1
- ;;
+ do_dup_host_includes
+ [ $? = 0 ] || return 1
+ do_dup_vserver
+ [ $? = 0 ] || return 1
+ ;;
*)
- return 1
- ;;
+ return 1
+ ;;
esac
do_dup_excludes
[ $? = 0 ] || return 1
-
+
_src_done="(DONE)"
setDefault dest
}
@@ -102,13 +103,13 @@ do_dup_dest() {
REPLY=
while [ -z "$REPLY" -o -z "$dup_destdir" -o -z "$dup_desthost" -o -z "$dup_destuser" ]; do
formBegin "$dup_title - destination: first three items are compulsory"
- formItem "desthost" "$dup_desthost"
- formItem "destuser" "$dup_destuser"
- formItem "destdir" "$dup_destdir"
- formItem "keep" "$dup_keep"
+ formItem "desthost" "$dup_desthost"
+ formItem "destuser" "$dup_destuser"
+ formItem "destdir" "$dup_destdir"
+ formItem "keep" "$dup_keep"
formItem "incremental" "$dup_incremental"
- formItem "bandwidthlimit" "$dup_bandwidth"
- formItem "sshoptions" "$dup_sshoptions"
+ formItem "bandwidthlimit" "$dup_bandwidth"
+ formItem "sshoptions" "$dup_sshoptions"
formDisplay
[ $? = 0 ] || return 1
@@ -117,7 +118,7 @@ do_dup_dest() {
IFS=$':'
thereply=($replyconverted)
IFS=$' \t\n'
-
+
dup_desthost=${thereply[0]}
dup_destuser=${thereply[1]}
dup_destdir=${thereply[2]}
@@ -165,9 +166,9 @@ do_dup_gpg_signkey() {
# signkey ?
REPLY=
while [ -z "$REPLY" -o -z "$dup_gpg_signkey" ]; do
- inputBox "$dup_title - GnuPG" "Enter the ID of the private GnuPG key to be used to sign the backups:" "$dup_gpg_signkey"
- [ $? = 0 ] || return 1
- dup_gpg_signkey="$REPLY"
+ inputBox "$dup_title - GnuPG" "Enter the ID of the private GnuPG key to be used to sign the backups:" "$dup_gpg_signkey"
+ [ $? = 0 ] || return 1
+ dup_gpg_signkey="$REPLY"
done
fi
}
@@ -183,7 +184,7 @@ do_dup_gpg_passphrase() {
}
do_dup_gpg() {
-
+
# symmetric or public key encryption ?
booleanBox "$dup_title - GnuPG" "Use public key encryption? Otherwise, symmetric encryption will be used, and data signing will be impossible." "$dup_gpg_asymmetric_encryption"
if [ $? = 0 ]; then
@@ -197,7 +198,7 @@ do_dup_gpg() {
do_dup_gpg_encryptkey ; [ $? = 0 ] || return 1
do_dup_gpg_sign ; [ $? = 0 ] || return 1
if [ "$dup_gpg_sign" == yes ]; then
- do_dup_gpg_signkey ; [ $? = 0 ] || return 1
+ do_dup_gpg_signkey ; [ $? = 0 ] || return 1
fi
else
dup_gpg_sign=no
@@ -334,7 +335,7 @@ EOF
if [ "$host_or_vservers" == host -o "$host_or_vservers" == both ]; then
set -o noglob
for i in $dup_includes; do
- echo "include = $i" >> $next_filename
+ echo "include = $i" >> $next_filename
done
set +o noglob
fi
@@ -402,6 +403,12 @@ keep = $dup_keep
# bandwithlimit. For details, see duplicity manpage, section "URL FORMAT".
#desturl = file:///usr/local/backup
#desturl = rsync://user@other.host//var/backup/bla
+#desturl = s3+http://your_bucket
+
+# Amazon Web Services Access Key ID and Secret Access Key, needed for backups
+# to S3 buckets.
+#awsaccesskeyid = YOUR_AWS_ACCESS_KEY_ID
+#awssecretaccesskey = YOUR_AWS_SECRET_KEY
# bandwith limit, in kbit/s ; default is 0, i.e. no limit
#bandwidthlimit = 128
@@ -432,49 +439,49 @@ EOF
dup_main_menu() {
- while true; do
- srcitem="choose files to include & exclude $_src_done"
- destitem="configure backup destination $_dest_done"
- gpgitem="configure GnuPG encryption/signing $_gpg_done"
- conitem="set up ssh keys and test remote connection $_con_done"
- advitem="edit advanced settings $_adv_done"
- # TODO: add the following to the menu when do_dup_conn is written
- # conn "$conitem" \
- menuBox "$dup_title" "choose a step:" \
- src "$srcitem" \
- dest "$destitem" \
- gpg "$gpgitem" \
- adv "$advitem" \
- finish "finish and create config file"
- [ $? = 0 ] || return 1
- result="$REPLY"
-
- case "$result" in
- "src") do_dup_src;;
- "dest") do_dup_dest;;
- "gpg") do_dup_gpg;;
- # TODO: enable the following when do_dup_conn is written
- # "conn") do_dup_conn;;
- "adv") do_dup_adv;;
- "finish")
- if [[ "$_dest_done$_gpg_done$_src_done" != "(DONE)(DONE)(DONE)" ]]; then
- # TODO: replace the previous test by the following when do_dup_conn is written
- # if [[ "$_con_done$_dest_done$_gpg_done$_src_done" != "(DONE)(DONE)(DONE)(DONE)" ]]; then
- msgBox "$dup_title" "You cannot create the configuration file until the four first steps are completed."
- else
- do_dup_finish
- break
- fi
- ;;
- esac
-
- done
+ while true; do
+ srcitem="choose files to include & exclude $_src_done"
+ destitem="configure backup destination $_dest_done"
+ gpgitem="configure GnuPG encryption/signing $_gpg_done"
+ conitem="set up ssh keys and test remote connection $_con_done"
+ advitem="edit advanced settings $_adv_done"
+ # TODO: add the following to the menu when do_dup_conn is written
+ # conn "$conitem" \
+ menuBox "$dup_title" "choose a step:" \
+ src "$srcitem" \
+ dest "$destitem" \
+ gpg "$gpgitem" \
+ adv "$advitem" \
+ finish "finish and create config file"
+ [ $? = 0 ] || return 1
+ result="$REPLY"
+
+ case "$result" in
+ "src") do_dup_src;;
+ "dest") do_dup_dest;;
+ "gpg") do_dup_gpg;;
+ # TODO: enable the following when do_dup_conn is written
+ # "conn") do_dup_conn;;
+ "adv") do_dup_adv;;
+ "finish")
+ if [[ "$_dest_done$_gpg_done$_src_done" != "(DONE)(DONE)(DONE)" ]]; then
+ # TODO: replace the previous test by the following when do_dup_conn is written
+ # if [[ "$_con_done$_dest_done$_gpg_done$_src_done" != "(DONE)(DONE)(DONE)(DONE)" ]]; then
+ msgBox "$dup_title" "You cannot create the configuration file until the four first steps are completed."
+ else
+ do_dup_finish
+ break
+ fi
+ ;;
+ esac
+
+ done
}
### Main function
dup_wizard() {
-
+
require_packages duplicity
# Global variables
@@ -507,7 +514,7 @@ dup_wizard() {
# Global variables whose '*' shall not be expanded
set -o noglob
dup_default_includes="/var/spool/cron/crontabs /var/backups /etc /root /home /usr/local/*bin /var/lib/dpkg/status*"
- dup_default_excludes="/home/*/.gnupg /home/*/.gnupg /home/*/.local/share/Trash /home/*/.Trash /home/*/.thumbnails /home/*/.beagle /home/*/.aMule /home/*/gtk-gnutella-downloads"
+ dup_default_excludes="/home/*/.gnupg /home/*/.local/share/Trash /home/*/.Trash /home/*/.thumbnails /home/*/.beagle /home/*/.aMule /home/*/gtk-gnutella-downloads"
set +o noglob
dup_main_menu
diff --git a/handlers/dup.in b/handlers/dup.in
index aed6030..ffae48c 100644
--- a/handlers/dup.in
+++ b/handlers/dup.in
@@ -26,6 +26,8 @@ setsection dest
getconf incremental yes
getconf keep 60
getconf desturl
+getconf awsaccesskeyid
+getconf awssecretaccesskey
getconf sshoptions
getconf bandwidthlimit 0
getconf desthost
@@ -38,6 +40,9 @@ destdir=${destdir%/}
[ -n "$desturl" -o -n "$destdir" ] || fatal "The destination directory (destdir) must be set when desturl is not used."
[ -n "$include" -o -n "$vsinclude" ] || fatal "No source includes specified"
[ -n "$password" ] || fatal "The password option must be set."
+if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "s3+http" ]; then
+ [ -n "$awsaccesskeyid" -a -n "$awssecretaccesskey" ] || fatal "AWS access keys must be set for S3 backups."
+fi
### VServers
# If vservers are configured, check that the ones listed in $vsnames do exist.
@@ -106,8 +111,8 @@ duplicity_sub="`echo $duplicity_version | @AWK@ -F '.' '{print $3}'`"
# --sftp-command ourselves
scpoptions="$sshoptions"
-if [ "$bandwidthlimit" =! 0 ]; then
- [ -z "$testurl" ] || warning 'The bandwidthlimit option is not used when desturl is set.'
+if [ "$bandwidthlimit" != 0 ]; then
+ [ -z "$desturl" ] || warning 'The bandwidthlimit option is not used when desturl is set.'
scpoptions="$scpoptions -l $bandwidthlimit"
fi
@@ -189,26 +194,35 @@ fi
set -o noglob
# excludes
+SAVEIFS=$IFS
+IFS=$(echo -en "\n\b")
for i in $exclude; do
str="${i//__star__/*}"
execstr_source="${execstr_source} --exclude '$str'"
done
+IFS=$SAVEIFS
# includes
+SAVEIFS=$IFS
+IFS=$(echo -en "\n\b")
for i in $include; do
[ "$i" != "/" ] || fatal "Sorry, you cannot use 'include = /'"
str="${i//__star__/*}"
execstr_source="${execstr_source} --include '$str'"
done
+IFS=$SAVEIFS
# vsincludes
if [ $usevserver = yes ]; then
for vserver in $vsnames; do
+ SAVEIFS=$IFS
+ IFS=$(echo -en "\n\b")
for vi in $vsinclude; do
str="${vi//__star__/*}"
str="$VROOTDIR/$vserver$str"
execstr_source="${execstr_source} --include '$str'"
done
+ IFS=$SAVEIFS
done
fi
@@ -218,6 +232,12 @@ set +o noglob
execstr_source=${execstr_source//\\*/\\\\\\*}
+### If desturl is an S3 URL export the AWS environment variables
+if [ "`echo $desturl | @AWK@ -F ':' '{print $1}'`" == "s3+http" ]; then
+ export AWS_ACCESS_KEY_ID="$awsaccesskeyid"
+ export AWS_SECRET_ACCESS_KEY="$awssecretaccesskey"
+fi
+
### Cleanup commands (duplicity >= 0.4.4)
# cleanup
@@ -263,18 +283,23 @@ fi
### Backup command
debug "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart"
if [ ! $test ]; then
+ outputfile=`maketemp backupout`
export PASSPHRASE=$password
output=`nice -n $nicelevel \
su -c \
- "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart 2>&1"`
+ "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"`
exit_code=$?
+ debug $output
+ cat $outputfile | (while read output ; do
+ info $output
+ done
+ )
if [ $exit_code -eq 0 ]; then
- debug $output
info "Duplicity finished successfully."
else
- debug $output
fatal "Duplicity failed."
fi
+ rm $outputfile
fi
return 0
diff --git a/handlers/ldap.helper.in b/handlers/ldap.helper.in
index 9251826..4154cc6 100644
--- a/handlers/ldap.helper.in
+++ b/handlers/ldap.helper.in
@@ -1,9 +1,10 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS ldap:ldap_database_backup"
ldap_create_file() {
-while true; do
+ while true; do
checkBox "ldap action wizard" "check options (slapcat OR ldapsearch)" \
"slapcat" "export ldif using slapcat" yes \
"ldapsearch" "export ldif using ldapsearch" no \
@@ -22,21 +23,21 @@ while true; do
result="$REPLY"
for opt in $result; do
case $opt in
- '"compress"') compress="compress = yes";;
- '"slapcat"')
- method="method = slapcat"
- [ "$_RESTART" == "yes" ] && restart="restart = yes"
- ;;
- '"ldapsearch"')
- method="method = ldapsearch"
- inputBox "ldap action wizard" "ldapsearch requires authentication. Specify here what password file to use. It must have the password with no trailing return and it should not be world readable."
- [ $? = 1 ] && return
- passwordfile="passwordfile = $REPLY"
- inputBox "ldap action wizard" "ldapsearch requires authentication. Specify here what DN to bind as:"
- [ $? = 1 ] && return
- binddn="binddn = $REPLY"
- require_packages ldap-utils
- ;;
+ '"compress"') compress="compress = yes";;
+ '"slapcat"')
+ method="method = slapcat"
+ [ "$_RESTART" == "yes" ] && restart="restart = yes"
+ ;;
+ '"ldapsearch"')
+ method="method = ldapsearch"
+ inputBox "ldap action wizard" "ldapsearch requires authentication. Specify here what password file to use. It must have the password with no trailing return and it should not be world readable."
+ [ $? = 1 ] && return
+ passwordfile="passwordfile = $REPLY"
+ inputBox "ldap action wizard" "ldapsearch requires authentication. Specify here what DN to bind as:"
+ [ $? = 1 ] && return
+ binddn="binddn = $REPLY"
+ require_packages ldap-utils
+ ;;
'"ssl"') ssl="ssl = yes";;
'"tls"') tls="tls = yes";;
esac
@@ -54,9 +55,9 @@ $tls
# conf = /etc/ldap/slapd.conf
# databases = all
EOF
- chmod 600 $next_filename
- return
-done
+ chmod 600 $next_filename
+ return
+ done
}
ldap_wizard() {
@@ -71,7 +72,7 @@ ldap_wizard() {
elif [ "$backend" == "ldbm" ]; then
ldbm=yes
fi
- done
+ done
if [ "$bdb" == "yes" -o "$hdb" == "yes" ]; then
if [ "$ldbm" == "no" ]; then
@@ -80,11 +81,11 @@ ldap_wizard() {
ldap_create_file
fi
elif [ "$ldbm" == "yes" ]; then
- msgBox "ldap action wizard" "It looks like the backend in your slapd.conf is set to LDBM. Because of this, you will have less options (because it is not safe to use slapcat while slapd is running LDBM)."
- _RESTART=yes
- ldap_create_file
+ msgBox "ldap action wizard" "It looks like the backend in your slapd.conf is set to LDBM. Because of this, you will have less options (because it is not safe to use slapcat while slapd is running LDBM)."
+ _RESTART=yes
+ ldap_create_file
else
- msgBox "ldap action wizard" "I couldn't find any supported backend in your slapd.conf. Bailing out."
- return
+ msgBox "ldap action wizard" "I couldn't find any supported backend in your slapd.conf. Bailing out."
+ return
fi
}
diff --git a/handlers/ldap.in b/handlers/ldap.in
index 8ff1ccf..fda24d0 100644
--- a/handlers/ldap.in
+++ b/handlers/ldap.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# openldap backup handler script for backupninja
#
@@ -35,25 +36,25 @@ dbsuffixes=(`@AWK@ 'BEGIN {OFS=":"} /[:space:]*^database[:space:]*\w*/ {db=$2};
if [ "$ldif" == "yes" ]; then
dumpdir="$backupdir"
[ -d $dumpdir ] || mkdir -p $dumpdir
-
+
if [ "$databases" == 'all' ]; then
dbcount=`grep '^database' $conf | wc -l`
let "dbcount = dbcount - 1"
databases=`seq 0 $dbcount`;
- fi
-
+ fi
+
for db in $databases; do
if [ `expr index "$db" "="` == "0" ]; then
- # db is a number, get the suffix.
+ # db is a number, get the suffix.
dbsuffix=${dbsuffixes[$db]/*:/}
else
dbsuffix=$db
fi
- # some databases don't have suffix (like monitor), skip these
+ # some databases don't have suffix (like monitor), skip these
if [ "$dbsuffix" == "" ]; then
continue;
fi
-
+
if [ "$method" == "slapcat" ]; then
execstr="$SLAPCAT -f $conf -b $dbsuffix"
else
@@ -74,23 +75,23 @@ if [ "$ldif" == "yes" ]; then
debug "Shutting down ldap server..."
/etc/init.d/slapd stop
fi
-
- ext=
- if [ "$compress" == "yes" ]; then
- ext=".gz"
- fi
+
+ ext=
+ if [ "$compress" == "yes" ]; then
+ ext=".gz"
+ fi
touch $dumpdir/$dbsuffix.ldif$ext
if [ ! -f $dumpdir/$dbsuffix.ldif$ext ]; then
fatal "Couldn't create ldif dump file: $dumpdir/$dbsuffix.ldif$ext"
fi
-
+
if [ "$compress" == "yes" ]; then
- execstr="$execstr | $GZIP > $dumpdir/$dbsuffix.ldif.gz"
+ execstr="$execstr | $GZIP --rsyncable > $dumpdir/$dbsuffix.ldif.gz"
else
execstr="$execstr > $dumpdir/$dbsuffix.ldif"
fi
debug "$execstr"
- output=`su root -c "$execstr" 2>&1`
+ output=`su root -c "set -o pipefail ; $execstr" 2>&1`
code=$?
if [ "$code" == "0" ]; then
debug $output
@@ -99,13 +100,13 @@ if [ "$ldif" == "yes" ]; then
warning $output
warning "Failed ldif export of $dbsuffix"
fi
-
+
if [ "$restart" == "yes" ]; then
debug "Starting ldap server..."
/etc/init.d/slapd start
fi
fi
- done
+ done
fi
return 0
diff --git a/handlers/maildir.in b/handlers/maildir.in
index 3514153..44959aa 100644
--- a/handlers/maildir.in
+++ b/handlers/maildir.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
###############################################################
#
@@ -6,17 +7,17 @@
# to a remote server. It is designed to be run with low overhead
# in terms of cpu and bandwidth so it runs pretty slow.
# Hardlinking is used to save storage space.
-#
+#
# This handler expects that your maildir directory structure is
# either one of the following:
-#
-# 1. /$srcdir/[a-zA-Z0-9]/$user for example:
+#
+# 1. /$srcdir/[a-zA-Z0-9]/$user for example:
# /var/maildir/a/anarchist
# /var/maildir/a/arthur
# ...
# /var/maildir/Z/Zaphod
# /var/maildir/Z/Zebra
-#
+#
# 2. or the following:
# /var/maildir/domain.org/user1
# /var/maildir/domain.org/user2
@@ -25,7 +26,7 @@
# /var/maildir/anotherdomain.org/user2
# ...
#
-# if the configuration is setup to have keepdaily at 3,
+# if the configuration is setup to have keepdaily at 3,
# keepweekly is 2, and keepmonthly is 1, then each user's
# maildir backup snapshot directory will contain these files:
# daily.1
@@ -41,15 +42,15 @@
# We handle each maildir individually because it becomes very
# unweldy to hardlink and rsync many hundreds of thousands
# of files at once. It is much faster to take on smaller
-# chunks at a time.
+# chunks at a time.
#
-# For the backup rotation to work, destuser must be able to run
+# For the backup rotation to work, destuser must be able to run
# arbitrary bash commands on the desthost.
#
# Any maildir which is deleted from the source will be moved to
-# "deleted" directory in the destination. It is up to you to
+# "deleted" directory in the destination. It is up to you to
# periodically remove this directory or old maildirs in it.
-#
+#
##############################################################
getconf rotate yes
@@ -68,6 +69,7 @@ getconf desthost
getconf destport 22
getconf destuser
getconf destid_file /root/.ssh/id_rsa
+getconf sshoptions
getconf multiconnection notset
@@ -84,40 +86,40 @@ if [ $test ]; then
testflags="--dry-run -v"
fi
-rsyncflags="$testflags -e 'ssh -p $destport -i $destid_file' -r -v --ignore-existing --delete --size-only --bwlimit=$speedlimit"
+rsyncflags="$testflags -e 'ssh -p $destport -i $destid_file $sshoptions' -r -v --ignore-existing --delete --size-only --bwlimit=$speedlimit"
excludes="--exclude '.Trash/\*' --exclude '.Mistakes/\*' --exclude '.Spam/\*'"
##################################################################
### FUNCTIONS
function do_user() {
- local user=$1
- local btype=$2
- local userdir=${3%/}
- local source="$srcdir/$userdir/$user/"
- local target="$destdir/$userdir/$user/$btype.1"
- if [ ! -d $source ]; then
- warning "maildir $source not found"
- return
- fi
-
- debug "syncing"
- ret=`$RSYNC -e "ssh -p $destport -i $destid_file" -r \
- --links --ignore-existing --delete --size-only --bwlimit=$speedlimit \
- --exclude '.Trash/*' --exclude '.Mistakes/*' --exclude '.Spam/*' \
- $source $destuser@$desthost:$target \
- 2>&1`
- ret=$?
- # ignore 0 (success) and 24 (file vanished before it could be copied)
- if [ $ret != 0 -a $ret != 24 ]; then
- warning "rsync $user failed"
- warning " returned: $ret"
- let "failedcount = failedcount + 1"
- if [ $failedcount -gt 100 ]; then
- fatal "100 rsync errors -- something is not working right. bailing out."
- fi
- fi
- ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file "date +%c%n%s > $target/created"
+ local user=$1
+ local btype=$2
+ local userdir=${3%/}
+ local source="$srcdir/$userdir/$user/"
+ local target="$destdir/$userdir/$user/$btype.1"
+ if [ ! -d $source ]; then
+ warning "maildir $source not found"
+ return
+ fi
+
+ debug "syncing"
+ ret=`$RSYNC -e "ssh -p $destport -i $destid_file $sshoptions" -r \
+ --links --ignore-existing --delete --size-only --bwlimit=$speedlimit \
+ --exclude '.Trash/*' --exclude '.Mistakes/*' --exclude '.Spam/*' \
+ $source $destuser@$desthost:$target \
+ 2>&1`
+ ret=$?
+ # ignore 0 (success) and 24 (file vanished before it could be copied)
+ if [ $ret != 0 -a $ret != 24 ]; then
+ warning "rsync $user failed"
+ warning " returned: $ret"
+ let "failedcount = failedcount + 1"
+ if [ $failedcount -gt 100 ]; then
+ fatal "100 rsync errors -- something is not working right. bailing out."
+ fi
+ fi
+ ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions "date +%c%n%s > $target/created"
}
# remove any maildirs from backup which might have been deleted
@@ -125,6 +127,7 @@ function do_user() {
# (actually, it just moved them to the directory "deleted")
function do_remove() {
+<<<<<<< HEAD
local tmp1=`maketemp maildir-tmp-file`
local tmp2=`maketemp maildir-tmp-file`
@@ -142,99 +145,118 @@ function do_remove() {
done
rm $tmp1
rm $tmp2
+=======
+ local tmp1=`maketemp maildir-tmp-file`
+ local tmp2=`maketemp maildir-tmp-file`
+
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost mkdir -p "$destdir/deleted"
+ cd "$srcdir"
+ for userdir in `ls -d1 */`; do
+ ls -1 "$srcdir/$userdir" | sort > $tmp1
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost ls -1 "$destdir/$userdir" | sort > $tmp2
+ for deluser in `join -v 2 $tmp1 $tmp2`; do
+ [ "$deluser" != "" ] || continue
+ info "removing $destuser@$desthost:$destdir/$userdir$deluser/"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost mv "$destdir/$userdir$deluser/" "$destdir/deleted"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost "date +%c%n%s > '$destdir/deleted/$deluser/deleted_on'"
+ done
+ done
+ rm $tmp1
+ rm $tmp2
+>>>>>>> master
}
function do_rotate() {
- [ "$rotate" == "yes" ] || return;
- local user=$1
- local userdir=${2%/}
- local backuproot="$destdir/$userdir/$user"
+ [ "$rotate" == "yes" ] || return;
+ local user=$1
+ local userdir=${2%/}
+ local backuproot="$destdir/$userdir/$user"
(
- ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file <<EOF
+ ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions <<EOF
##### BEGIN REMOTE SCRIPT #####
- seconds_daily=86400
- seconds_weekly=604800
- seconds_monthly=2628000
- keepdaily=$keepdaily
- keepweekly=$keepweekly
- keepmonthly=$keepmonthly
- now=\`date +%s\`
-
- if [ ! -d "$backuproot" ]; then
- echo "Debug: skipping rotate of $user. $backuproot doesn't exist."
- exit
- fi
- for rottype in daily weekly monthly; do
- seconds=\$((seconds_\${rottype}))
-
- dir="$backuproot/\$rottype"
- if [ ! -d \$dir.1 ]; then
- echo "Debug: \$dir.1 does not exist, skipping."
- continue 1
- elif [ ! -f \$dir.1/created ]; then
- echo "Warning: \$dir.1/created does not exist. This backup may be only partially completed. Skipping rotation."
- continue 1
- fi
-
- # Rotate the current list of backups, if we can.
- oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
- #echo "Debug: oldest \$oldest"
- [ "\$oldest" == "" ] && oldest=0
- for (( i=\$oldest; i > 0; i-- )); do
- if [ -d \$dir.\$i ]; then
- if [ -f \$dir.\$i/created ]; then
- created=\`tail -1 \$dir.\$i/created\`
- else
- created=0
- fi
- cutoff_time=\$(( now - (seconds*(i-1)) ))
- if [ ! \$created -gt \$cutoff_time ]; then
- next=\$(( i + 1 ))
- if [ ! -d \$dir.\$next ]; then
- echo "Debug: \$rottype.\$i --> \$rottype.\$next"
- mv \$dir.\$i \$dir.\$next
- date +%c%n%s > \$dir.\$next/rotated
- else
- echo "Debug: skipping rotation of \$dir.\$i because \$dir.\$next already exists."
- fi
- else
- echo "Debug: skipping rotation of \$dir.\$i because it was created" \$(( (now-created)/86400)) "days ago ("\$(( (now-cutoff_time)/86400))" needed)."
- fi
- fi
- done
- done
-
- max=\$((keepdaily+1))
- if [ \( \$keepweekly -gt 0 -a -d $backuproot/daily.\$max \) -a ! -d $backuproot/weekly.1 ]; then
- echo "Debug: daily.\$max --> weekly.1"
- mv $backuproot/daily.\$max $backuproot/weekly.1
- date +%c%n%s > $backuproot/weekly.1/rotated
- fi
-
- max=\$((keepweekly+1))
- if [ \( \$keepmonthly -gt 0 -a -d $backuproot/weekly.\$max \) -a ! -d $backuproot/monthly.1 ]; then
- echo "Debug: weekly.\$max --> monthly.1"
- mv $backuproot/weekly.\$max $backuproot/monthly.1
- date +%c%n%s > $backuproot/monthly.1/rotated
- fi
-
- for rottype in daily weekly monthly; do
- max=\$((keep\${rottype}+1))
- dir="$backuproot/\$rottype"
- oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
- [ "\$oldest" == "" ] && oldest=0
- # if we've rotated the last backup off the stack, remove it.
- for (( i=\$oldest; i >= \$max; i-- )); do
- if [ -d \$dir.\$i ]; then
- if [ -d $backuproot/rotate.tmp ]; then
- echo "Debug: removing rotate.tmp"
- rm -rf $backuproot/rotate.tmp
- fi
- echo "Debug: moving \$rottype.\$i to rotate.tmp"
- mv \$dir.\$i $backuproot/rotate.tmp
- fi
- done
- done
+ seconds_daily=86400
+ seconds_weekly=604800
+ seconds_monthly=2628000
+ keepdaily=$keepdaily
+ keepweekly=$keepweekly
+ keepmonthly=$keepmonthly
+ now=\`date +%s\`
+
+ if [ ! -d "$backuproot" ]; then
+ echo "Debug: skipping rotate of $user. $backuproot doesn't exist."
+ exit
+ fi
+ for rottype in daily weekly monthly; do
+ seconds=\$((seconds_\${rottype}))
+
+ dir="$backuproot/\$rottype"
+ if [ ! -d \$dir.1 ]; then
+ echo "Debug: \$dir.1 does not exist, skipping."
+ continue 1
+ elif [ ! -f \$dir.1/created ]; then
+ echo "Warning: \$dir.1/created does not exist. This backup may be only partially completed. Skipping rotation."
+ continue 1
+ fi
+
+ # Rotate the current list of backups, if we can.
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ #echo "Debug: oldest \$oldest"
+ [ "\$oldest" == "" ] && oldest=0
+ for (( i=\$oldest; i > 0; i-- )); do
+ if [ -d \$dir.\$i ]; then
+ if [ -f \$dir.\$i/created ]; then
+ created=\`tail -1 \$dir.\$i/created\`
+ else
+ created=0
+ fi
+ cutoff_time=\$(( now - (seconds*(i-1)) ))
+ if [ ! \$created -gt \$cutoff_time ]; then
+ next=\$(( i + 1 ))
+ if [ ! -d \$dir.\$next ]; then
+ echo "Debug: \$rottype.\$i --> \$rottype.\$next"
+ mv \$dir.\$i \$dir.\$next
+ date +%c%n%s > \$dir.\$next/rotated
+ else
+ echo "Debug: skipping rotation of \$dir.\$i because \$dir.\$next already exists."
+ fi
+ else
+ echo "Debug: skipping rotation of \$dir.\$i because it was created" \$(( (now-created)/86400)) "days ago ("\$(( (now-cutoff_time)/86400))" needed)."
+ fi
+ fi
+ done
+ done
+
+ max=\$((keepdaily+1))
+ if [ \( \$keepweekly -gt 0 -a -d $backuproot/daily.\$max \) -a ! -d $backuproot/weekly.1 ]; then
+ echo "Debug: daily.\$max --> weekly.1"
+ mv $backuproot/daily.\$max $backuproot/weekly.1
+ date +%c%n%s > $backuproot/weekly.1/rotated
+ fi
+
+ max=\$((keepweekly+1))
+ if [ \( \$keepmonthly -gt 0 -a -d $backuproot/weekly.\$max \) -a ! -d $backuproot/monthly.1 ]; then
+ echo "Debug: weekly.\$max --> monthly.1"
+ mv $backuproot/weekly.\$max $backuproot/monthly.1
+ date +%c%n%s > $backuproot/monthly.1/rotated
+ fi
+
+ for rottype in daily weekly monthly; do
+ max=\$((keep\${rottype}+1))
+ dir="$backuproot/\$rottype"
+ oldest=\`find $backuproot -maxdepth 1 -type d -name \$rottype'.*' | @SED@ 's/^.*\.//' | sort -n | tail -1\`
+ [ "\$oldest" == "" ] && oldest=0
+ # if we've rotated the last backup off the stack, remove it.
+ for (( i=\$oldest; i >= \$max; i-- )); do
+ if [ -d \$dir.\$i ]; then
+ if [ -d $backuproot/rotate.tmp ]; then
+ echo "Debug: removing rotate.tmp"
+ rm -rf $backuproot/rotate.tmp
+ fi
+ echo "Debug: moving \$rottype.\$i to rotate.tmp"
+ mv \$dir.\$i $backuproot/rotate.tmp
+ fi
+ done
+ done
####### END REMOTE SCRIPT #######
EOF
) | (while read a; do passthru $a; done)
@@ -243,128 +265,127 @@ EOF
function setup_remote_dirs() {
- local user=$1
- local backuptype=$2
- local userdir=${3%/}
- local dir="$destdir/$userdir/$user/$backuptype"
- local tmpdir="$destdir/$userdir/$user/rotate.tmp"
+ local user=$1
+ local backuptype=$2
+ local userdir=${3%/}
+ local dir="$destdir/$userdir/$user/$backuptype"
+ local tmpdir="$destdir/$userdir/$user/rotate.tmp"
(
- ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file <<EOF
- if [ ! -d $destdir ]; then
- echo "Fatal: Destination directory $destdir does not exist on host $desthost."
- exit 1
- elif [ -d $dir.1 ]; then
- if [ -f $dir.1/created ]; then
- echo "Warning: $dir.1 already exists. Overwriting contents."
- else
- echo "Warning: we seem to be resuming a partially written $dir.1"
- fi
- else
- if [ -d $tmpdir ]; then
- mv $tmpdir $dir.1
- if [ \$? == 1 ]; then
- echo "Fatal: could mv $destdir/rotate.tmp $dir.1 on host $desthost"
- exit 1
- fi
- else
- mkdir --parents $dir.1
- if [ \$? == 1 ]; then
- echo "Fatal: could not create directory $dir.1 on host $desthost"
- exit 1
- fi
- fi
- if [ -d $dir.2 ]; then
- echo "Debug: update links $backuptype.2 --> $backuptype.1"
- cp -alf $dir.2/. $dir.1
- #if [ \$? == 1 ]; then
- # echo "Fatal: could not create hard links to $dir.1 on host $desthost"
- # exit 1
- #fi
- fi
- fi
- [ -f $dir.1/created ] && rm $dir.1/created
- [ -f $dir.1/rotated ] && rm $dir.1/rotated
- exit 0
+ ssh -T -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions <<EOF
+ if [ ! -d $destdir ]; then
+ echo "Fatal: Destination directory $destdir does not exist on host $desthost."
+ exit 1
+ elif [ -d $dir.1 ]; then
+ if [ -f $dir.1/created ]; then
+ echo "Warning: $dir.1 already exists. Overwriting contents."
+ else
+ echo "Warning: we seem to be resuming a partially written $dir.1"
+ fi
+ else
+ if [ -d $tmpdir ]; then
+ mv $tmpdir $dir.1
+ if [ \$? == 1 ]; then
+ echo "Fatal: could mv $destdir/rotate.tmp $dir.1 on host $desthost"
+ exit 1
+ fi
+ else
+ mkdir --parents $dir.1
+ if [ \$? == 1 ]; then
+ echo "Fatal: could not create directory $dir.1 on host $desthost"
+ exit 1
+ fi
+ fi
+ if [ -d $dir.2 ]; then
+ echo "Debug: update links $backuptype.2 --> $backuptype.1"
+ cp -alf $dir.2/. $dir.1
+ #if [ \$? == 1 ]; then
+ # echo "Fatal: could not create hard links to $dir.1 on host $desthost"
+ # exit 1
+ #fi
+ fi
+ fi
+ [ -f $dir.1/created ] && rm $dir.1/created
+ [ -f $dir.1/rotated ] && rm $dir.1/rotated
+ exit 0
EOF
) | (while read a; do passthru $a; done)
- if [ $? == 1 ]; then exit; fi
+ if [ $? == 1 ]; then exit; fi
}
function start_mux() {
- if [ "$multiconnection" == "yes" ]; then
- debug "Starting dummy ssh connection"
- ssh -p $destport -i $destid_file $destuser@$desthost sleep 1d &
- sleep 1
- fi
+ if [ "$multiconnection" == "yes" ]; then
+ debug "Starting dummy ssh connection"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost sleep 1d &
+ sleep 1
+ fi
}
function end_mux() {
- if [ "$multiconnection" == "yes" ]; then
- debug "Stopping dummy ssh connection"
- ssh -p $destport -i $destid_file $destuser@$desthost pkill sleep
- fi
+ if [ "$multiconnection" == "yes" ]; then
+ debug "Stopping dummy ssh connection"
+ ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost pkill sleep
+ fi
}
###
##################################################################
# see if we can login
-debug "ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file 'echo -n 1'"
+debug "ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions 'echo -n 1'"
if [ ! $test ]; then
- result=`ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file 'echo -n 1' 2>&1`
- if [ "$result" != "1" ]; then
- fatal "Can't connect to $desthost as $destuser using $destid_file."
- fi
+ result=`ssh -o PasswordAuthentication=no $desthost -l $destuser -i $destid_file $sshoptions 'echo -n 1' 2>&1`
+ if [ "$result" != "1" ]; then
+ fatal "Can't connect to $desthost as $destuser using $destid_file."
+ fi
fi
end_mux
start_mux
## SANITY CHECKS ##
-status=`ssh -p $destport -i $destid_file $destuser@$desthost "[ -d \"$destdir\" ] && echo 'ok'"`
+status=`ssh -p $destport -i $destid_file $sshoptions $destuser@$desthost "[ -d \"$destdir\" ] && echo 'ok'"`
if [ "$status" != "ok" ]; then
- end_mux
- fatal "Destination directory $destdir doesn't exist!"
- exit
+ end_mux
+ fatal "Destination directory $destdir doesn't exist!"
+ exit
fi
### REMOVE OLD MAILDIRS ###
if [ "$remove" == "yes" ]; then
- do_remove
+ do_remove
fi
### MAKE BACKUPS ###
if [ "$backup" == "yes" ]; then
- if [ $keepdaily -gt 0 ]; then btype=daily
- elif [ $keepweekly -gt 0 ]; then btype=weekly
- elif [ $keepmonthly -gt 0 ]; then btype=monthly
- else fatal "keeping no backups"; fi
-
- if [ "$testuser" != "" ]; then
- cd "$srcdir/${user:0:1}"
- do_rotate $testuser
- setup_remote_dirs $testuser $btype
- do_user $testuser $btype
- else
- [ -d "$srcdir" ] || fatal "directory $srcdir not found."
- cd "$srcdir"
- for userdir in `ls -d1 */`; do
- [ -d "$srcdir/$userdir" ] || fatal "directory $srcdir/$userdir not found."
- cd "$srcdir/$userdir"
- debug $userdir
- for user in `ls -1`; do
- [ "$user" != "" ] || continue
- debug "$user $userdir"
- do_rotate $user $userdir
- setup_remote_dirs $user $btype $userdir
- do_user $user $btype $userdir
- done
- done
- fi
+ if [ $keepdaily -gt 0 ]; then btype=daily
+ elif [ $keepweekly -gt 0 ]; then btype=weekly
+ elif [ $keepmonthly -gt 0 ]; then btype=monthly
+ else fatal "keeping no backups"; fi
+
+ if [ "$testuser" != "" ]; then
+ cd "$srcdir/${user:0:1}"
+ do_rotate $testuser
+ setup_remote_dirs $testuser $btype
+ do_user $testuser $btype
+ else
+ [ -d "$srcdir" ] || fatal "directory $srcdir not found."
+ cd "$srcdir"
+ for userdir in `ls -d1 */`; do
+ [ -d "$srcdir/$userdir" ] || fatal "directory $srcdir/$userdir not found."
+ cd "$srcdir/$userdir"
+ debug $userdir
+ for user in `ls -1`; do
+ [ "$user" != "" ] || continue
+ debug "$user $userdir"
+ do_rotate $user $userdir
+ setup_remote_dirs $user $btype $userdir
+ do_user $user $btype $userdir
+ done
+ done
+ fi
fi
end_mux
-
diff --git a/handlers/makecd.helper.in b/handlers/makecd.helper.in
index f83b541..ad0c4aa 100644
--- a/handlers/makecd.helper.in
+++ b/handlers/makecd.helper.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS makecd:makecd_backup"
wizardname="makecd action wizard"
@@ -7,59 +8,59 @@ declare -a makecd_excludes
makecd_wizard() {
- inputBox "$wizardname" "specify a burner type cd or dvd:"
- [ $? = 1 ] && return
- burnertype="burnertype = $REPLY"
-
- booleanBox "$wizardname" "Make iso image only? or burn"
- if [ $? = 0 ]; then
- isoonly="isoonly = yes"
- else
- isoonly="isoonly = no"
- fi
-
- # backupdir
- inputBox "$wizardname" "Directory where to store the backups:"
- [ $? = 1 ] && return
- backupdir="backupdir = $REPLY"
-
- inputBox "$wizardname" "what name to give to the image file?"
- [ $? = 1 ] && return
- imagefile="imagefile = $REPLY"
-
- inputBox "$wizardname" "specify a burner device:"
- [ $? = 1 ] && return
- device="device = $REPLY"
-
- # target - root of system to be included
- inputBox "$wizardname" "root of filesystem for burn:"
- [ $? = 1 ] && return
- target="target = $REPLY"
-
-
- # excludes
-
- formBegin "$wizardname: excludes"
- for ((i=0; i < ${#makecd_excludes[@]} ; i++)); do
- formItem exclude ${makecd_excludes[$i]}
- done
- formItem exclude
- formItem exclude
- formItem exclude
- formItem exclude
- formItem exclude
- formItem exclude
- formItem exclude
- formItem exclude
- formItem exclude
- formDisplay
- [ $? = 1 ] && return;
-
- unset makecd_excludes
- makecd_excludes=($REPLY)
-
- get_next_filename $configdirectory/20.makecd
- cat >> $next_filename <<EOF
+ inputBox "$wizardname" "specify a burner type cd or dvd:"
+ [ $? = 1 ] && return
+ burnertype="burnertype = $REPLY"
+
+ booleanBox "$wizardname" "Make iso image only? or burn"
+ if [ $? = 0 ]; then
+ isoonly="isoonly = yes"
+ else
+ isoonly="isoonly = no"
+ fi
+
+ # backupdir
+ inputBox "$wizardname" "Directory where to store the backups:"
+ [ $? = 1 ] && return
+ backupdir="backupdir = $REPLY"
+
+ inputBox "$wizardname" "what name to give to the image file?"
+ [ $? = 1 ] && return
+ imagefile="imagefile = $REPLY"
+
+ inputBox "$wizardname" "specify a burner device:"
+ [ $? = 1 ] && return
+ device="device = $REPLY"
+
+ # target - root of system to be included
+ inputBox "$wizardname" "root of filesystem for burn:"
+ [ $? = 1 ] && return
+ target="target = $REPLY"
+
+
+ # excludes
+
+ formBegin "$wizardname: excludes"
+ for ((i=0; i < ${#makecd_excludes[@]} ; i++)); do
+ formItem exclude ${makecd_excludes[$i]}
+ done
+ formItem exclude
+ formItem exclude
+ formItem exclude
+ formItem exclude
+ formItem exclude
+ formItem exclude
+ formItem exclude
+ formItem exclude
+ formItem exclude
+ formDisplay
+ [ $? = 1 ] && return;
+
+ unset makecd_excludes
+ makecd_excludes=($REPLY)
+
+ get_next_filename $configdirectory/20.makecd
+ cat >> $next_filename <<EOF
# TYP is cd or dvd AS WELL AS the disk inside!!
$burnertype
@@ -88,10 +89,10 @@ $target
# exclude = /dev
EOF
- for ((j=0; j < ${#makecd_excludes[@]} ; j++)); do
- echo "exclude = ${makecd_excludes[$j]}" >> $next_filename
- done
-
+ for ((j=0; j < ${#makecd_excludes[@]} ; j++)); do
+ echo "exclude = ${makecd_excludes[$j]}" >> $next_filename
+ done
+
chmod 600 $next_filename
}
diff --git a/handlers/makecd.in b/handlers/makecd.in
index 1a95d6d..d44bba3 100644
--- a/handlers/makecd.in
+++ b/handlers/makecd.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# burncd handler script for backupninja
#
@@ -31,7 +32,7 @@ DVDINFO="/usr/bin/dvd+rw-mediainfo"
[ -x "$CDRDAO" ] || debug 3 "echo executable $CDRDAO not present"
if [ "$isoonly" == "no" ]; then
- [ -e $device ] || fatal "No Burner device available"
+ [ -e $device ] || fatal "No Burner device available"
fi
outputfile="$backupdir/$imagefile"
@@ -40,7 +41,7 @@ execstr="nice -n $nicelevel $MKISOFS --quiet -R -o $outputfile "
str=""
# excludes
for i in $exclude; do
- str=" -x ${i}$str"
+ str=" -x ${i}$str"
done
debug 0 "echo $str "
@@ -50,39 +51,39 @@ debug 0 "echo $execstr "
output=` $execstr 2>&1 `
code=$?
if [ "$code" == "0" ]; then
- debug $output
- info "Successfully finished creation of iso"
+ debug $output
+ info "Successfully finished creation of iso"
else
- warning $output
- warning "Failed to create iso"
+ warning $output
+ warning "Failed to create iso"
fi
if [ "$isoonly" == "no" ]; then
- if [ "$burnertype" == "cd" ]; then
- # burning iso to CD
- $CDRECORD -v gracetime=2 dev=$device speed=8 -dao -data $outputfile
- code=$?
- if [ "$code" == "0" ]; then
- debug $output
- info "Successfully burned CD"
- else
- warning $output
- warning "Failed to create CD"
- fi
- fi
- if [ "$burnertype" == "dvd" ]; then
- # burning iso dvd
- $GROWISOFS -speed=2 -Z $device=$outputfile -use-the-force-luke=notray -use-the-force-luke=tty
- code=$?
- if [ "$code" == "0" ]; then
- debug $output
- info "Successfully burned DVD"
- else
- warning $output
- warning "Failed to create DVD"
- fi
- fi
+ if [ "$burnertype" == "cd" ]; then
+ # burning iso to CD
+ $CDRECORD -v gracetime=2 dev=$device speed=8 -dao -data $outputfile
+ code=$?
+ if [ "$code" == "0" ]; then
+ debug $output
+ info "Successfully burned CD"
+ else
+ warning $output
+ warning "Failed to create CD"
+ fi
+ fi
+ if [ "$burnertype" == "dvd" ]; then
+ # burning iso dvd
+ $GROWISOFS -speed=2 -Z $device=$outputfile -use-the-force-luke=notray -use-the-force-luke=tty
+ code=$?
+ if [ "$code" == "0" ]; then
+ debug $output
+ info "Successfully burned DVD"
+ else
+ warning $output
+ warning "Failed to create DVD"
+ fi
+ fi
fi
return 0
diff --git a/handlers/mysql.helper.in b/handlers/mysql.helper.in
index 9622d41..d42bc7f 100644
--- a/handlers/mysql.helper.in
+++ b/handlers/mysql.helper.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS mysql:mysql_database_backup"
@@ -13,43 +14,43 @@ do_mysql_databases() {
while [ -z "$REPLY" ]; do
formBegin "$mysql_title: databases"
formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
formDisplay
[ $? = 0 ] || return 1
mysql_databases="databases = "
for i in $REPLY; do
- [ -n "$i" ] && mysql_databases="$mysql_databases $i"
+ [ -n "$i" ] && mysql_databases="$mysql_databases $i"
done
done
}
do_mysql_password() {
- inputBox "$mysql_title" "specify a mysql user:"
- [ $? = 1 ] && return
- user=$REPLY
- inputBox "$mysql_title" "specify the mysql user's password:"
- [ $? = 1 ] && return
- password=$REPLY
- do_mysql_final "dbusername = $user\ndbpassword = $password"
+ inputBox "$mysql_title" "specify a mysql user:"
+ [ $? = 1 ] && return
+ user=$REPLY
+ inputBox "$mysql_title" "specify the mysql user's password:"
+ [ $? = 1 ] && return
+ password=$REPLY
+ do_mysql_final "dbusername = $user\ndbpassword = $password"
}
do_mysql_debian() {
- _DISABLE_HOTCOPY=yes
- do_mysql_final "configfile = /etc/mysql/debian.cnf"
+ _DISABLE_HOTCOPY=yes
+ do_mysql_final "configfile = /etc/mysql/debian.cnf"
}
do_mysql_user() {
- inputBox "$mysql_title" "what system user does mysql backup use?"
- [ $? = 1 ] && return
- do_mysql_final "user = $REPLY"
+ inputBox "$mysql_title" "what system user does mysql backup use?"
+ [ $? = 1 ] && return
+ do_mysql_final "user = $REPLY"
}
do_mysql_final() {
@@ -69,18 +70,18 @@ do_mysql_final() {
hotcopy="hotcopy = no"
fi
- [ $status = 1 ] && return;
+ [ $status = 1 ] && return;
result="$REPLY"
compress="compress = no"
for opt in $result; do
case $opt in
- '"sqldump"') sqldump="sqldump = yes";;
- '"hotcopy"') hotcopy="hotcopy = yes";;
- '"compress"') compress="compress = yes";;
+ '"sqldump"') sqldump="sqldump = yes";;
+ '"hotcopy"') hotcopy="hotcopy = yes";;
+ '"compress"') compress="compress = yes";;
esac
done
get_next_filename $configdirectory/20.mysql
-
+
cat >> $next_filename <<EOF
### backupninja MySQL config file ###
@@ -109,7 +110,7 @@ EOF
$mysql_backupdir
# databases = <all | db1 db2 db3 > (default = all)
-# which databases to backup. should either be the word 'all' or a
+# which databases to backup. should either be the word 'all' or a
# space separated list of database names.
$mysql_databases
@@ -117,43 +118,43 @@ EOF
if [ $host_or_vservers == vservers ]
then
- cat >> $next_filename <<EOF
+ cat >> $next_filename <<EOF
#
# vsname = <vserver> (no default)
-# vsname indicates which vserver to operate on, this is only used if
+# vsname indicates which vserver to operate on, this is only used if
# vserver is set to yes in /etc/backupninja.conf
# NOTE: if you do not specify a vsname the host will be operated on
-# alsoNOTE: if operating on a vserver, $VROOTDIR will be
+# alsoNOTE: if operating on a vserver, $VROOTDIR will be
# prepended to backupdir.
EOF
echo -e "$mysql_vsname\n" >> $next_filename
fi
echo -e $@ >> $next_filename
-
+
chmod 600 $next_filename
}
mysql_wizard() {
-
+
# Global variables
mysql_title="MySQL action wizard"
-
+
# backup the host system or a Vserver?
choose_host_or_one_vserver "$mysql_title"
[ $? = 0 ] || return 1
if [ $host_or_vservers == vservers ]
then
- do_mysql_vserver
- [ $? = 0 ] || return 1
+ do_mysql_vserver
+ [ $? = 0 ] || return 1
fi
-
+
# backupdir
if [ $host_or_vservers == vservers ]
then
- inputBox "$mysql_title" "Directory where to store the backups:`echo \"\n(Relative to chosen vserver's root directory)\"`" "/var/backups/mysql"
+ inputBox "$mysql_title" "Directory where to store the backups:`echo \"\n(Relative to chosen vserver's root directory)\"`" "/var/backups/mysql"
else
- inputBox "$mysql_title" "Directory where to store the backups" "/var/backups/mysql"
+ inputBox "$mysql_title" "Directory where to store the backups" "/var/backups/mysql"
fi
[ $? = 1 ] && return
mysql_backupdir="backupdir = $REPLY"
@@ -166,23 +167,23 @@ mysql_wizard() {
do_mysql_databases
[ $? = 0 ] || return 1
fi
-
+
while true; do
_DISABLE_HOTCOPY=
menuBoxHelpFile "$mysql_title" "choose a mysql authentication method:" \
- user "change to a linux user first." \
- password "manually specify mysql user and password." \
- debian "use default mysql user debian-sys-maint."
+ user "change to a linux user first." \
+ password "manually specify mysql user and password." \
+ debian "use default mysql user debian-sys-maint."
status=$?
if [ $status = 2 ]; then
- # show help.
- helptmp="/tmp/backupninja.help.$$"
- cat > $helptmp <<EOF
+ # show help.
+ helptmp="/tmp/backupninja.help.$$"
+ cat > $helptmp <<EOF
To connect to mysql, backupninja must authenticate.
There are three possible authentication methods:
USER
-With this method, you specify a system user. Backupninja will
+With this method, you specify a system user. Backupninja will
then become this user before running mysqldump or mysqlhotcopy.
The result is that ~/.my.cnf is used for authentication.
@@ -194,11 +195,11 @@ DEBIAN
With this method, we use the debian-sys-maint user which is
already defined in /etc/mysql/debian.cnf. If you are running
debian, this is recommended, because no further configuration
-is needed. The drawback is that this is incompatible with
+is needed. The drawback is that this is incompatible with
mysqlhotcopy: you must use mysqldump.
EOF
- dialog --textbox $helptmp 0 0
- rm $helptmp
+ dialog --textbox $helptmp 0 0
+ rm $helptmp
fi
[ $status = 1 ] && return;
@@ -208,5 +209,5 @@ EOF
"password") do_mysql_password;return;;
"debian") do_mysql_debian;return;;
esac
- done
+ done
}
diff --git a/handlers/mysql.in b/handlers/mysql.in
index b304833..3488c51 100644
--- a/handlers/mysql.in
+++ b/handlers/mysql.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# mysql handler script for backupninja
#
@@ -10,6 +11,7 @@ getconf nodata
getconf dbhost localhost
getconf hotcopy no
getconf sqldump no
+getconf sqldumpoptions "--lock-tables --complete-insert --add-drop-table --quick --quote-names"
getconf compress yes
getconf vsname
@@ -46,9 +48,9 @@ fi
ignore=''
for i in $ignores $nodata; do
- ignore="$ignore --ignore-table=$i"
+ ignore="$ignore --ignore-table=$i"
done
-
+
# create backup dirs, $vroot will be empty if no vsname was specified
# and we will instead proceed to operate on the host
[ -d $vroot$backupdir ] || mkdir -p $vroot$backupdir
@@ -58,11 +60,11 @@ dumpdir="$backupdir/sqldump"
if [ $usevserver = yes ]
then
- [ "$sqldump" == "no" -o -d $vroot$dumpdir ] || $VSERVER $vsname exec mkdir -p $dumpdir
- [ "$hotcopy" == "no" -o -d $vroot$hotdir ] || $VSERVER $vsname exec mkdir -p $hotdir
+ [ "$sqldump" == "no" -o -d $vroot$dumpdir ] || $VSERVER $vsname exec mkdir -p $dumpdir
+ [ "$hotcopy" == "no" -o -d $vroot$hotdir ] || $VSERVER $vsname exec mkdir -p $hotdir
else
- [ "$sqldump" == "no" -o -d $dumpdir ] || mkdir -p $dumpdir
- [ "$hotcopy" == "no" -o -d $hotdir ] || mkdir -p $hotdir
+ [ "$sqldump" == "no" -o -d $dumpdir ] || mkdir -p $dumpdir
+ [ "$hotcopy" == "no" -o -d $hotdir ] || mkdir -p $hotdir
fi
#######################################################################
@@ -86,35 +88,35 @@ defaultsfile=""
if [ "$dbusername" != "" -a "$dbpassword" != "" ]
then
- if [ $usevserver = yes ]
- then
- home=`$VSERVER $vsname exec getent passwd "root" | @AWK@ -F: '{print $6}'`
- else
- home=`getent passwd "root" | @AWK@ -F: '{print $6}'`
- fi
+ if [ $usevserver = yes ]
+ then
+ home=`$VSERVER $vsname exec getent passwd "root" | @AWK@ -F: '{print $6}'`
+ else
+ home=`getent passwd "root" | @AWK@ -F: '{print $6}'`
+ fi
- [ -d $home ] || fatal "Can't find root's home directory ($home)."
+ [ -d $home ] || fatal "Can't find root's home directory ($home)."
- mycnf="$home/.my.cnf"
+ mycnf="$home/.my.cnf"
- if [ $usevserver = yes ]
- then
+ if [ $usevserver = yes ]
+ then
workcnf="$vroot$mycnf"
- else
+ else
workcnf="$mycnf"
- fi
+ fi
- if [ -f $workcnf ]
- then
+ if [ -f $workcnf ]
+ then
# rename temporarily
tmpcnf="$workcnf.disable"
debug "mv $workcnf $tmpcnf"
mv $workcnf $tmpcnf
- fi
+ fi
- oldmask=`umask`
- umask 077
- cat > $workcnf <<EOF
+ oldmask=`umask`
+ umask 077
+ cat > $workcnf <<EOF
# auto generated backupninja mysql conf
[mysql]
host=$dbhost
@@ -136,37 +138,37 @@ host=$dbhost
user=$dbusername
password="$dbpassword"
EOF
- umask $oldmask
- defaultsfile="--defaults-extra-file=$mycnf"
+ umask $oldmask
+ defaultsfile="--defaults-extra-file=$mycnf"
fi
# if a user is not set, use $configfile, otherwise use $mycnf
if [ "$user" == "" ]; then
- user=root;
- defaultsfile="--defaults-extra-file=$configfile"
+ user=root;
+ defaultsfile="--defaults-extra-file=$configfile"
else
- userset=true;
- if [ $usevserver = yes ]
- then
- userhome=`$VSERVER $vsname exec getent passwd "$user" | @AWK@ -F: '{print $6}'`
- if [ $? -eq 2 ]
- then
- fatal "User $user not found in /etc/passwd"
- fi
- debug "User home set to: $vroot$userhome"
- [ -f $vroot$userhome/.my.cnf ] || fatal "Can't find config file in $userhome/.my.cnf"
- else
- userhome=`getent passwd "$user" | @AWK@ -F: '{print $6}'`
- if [ $? -eq 2 ]
- then
- fatal "User $user not found in /etc/passwd"
- fi
- debug "User home set to: $userhome"
- [ -f $userhome/.my.cnf ] || fatal "Can't find config file in $userhome/.my.cnf"
- fi
+ userset=true;
+ if [ $usevserver = yes ]
+ then
+ userhome=`$VSERVER $vsname exec getent passwd "$user" | @AWK@ -F: '{print $6}'`
+ if [ $? -eq 2 ]
+ then
+ fatal "User $user not found in /etc/passwd"
+ fi
+ debug "User home set to: $vroot$userhome"
+ [ -f $vroot$userhome/.my.cnf ] || fatal "Can't find config file in $userhome/.my.cnf"
+ else
+ userhome=`getent passwd "$user" | @AWK@ -F: '{print $6}'`
+ if [ $? -eq 2 ]
+ then
+ fatal "User $user not found in /etc/passwd"
+ fi
+ debug "User home set to: $userhome"
+ [ -f $userhome/.my.cnf ] || fatal "Can't find config file in $userhome/.my.cnf"
+ fi
- defaultsfile="--defaults-extra-file=$userhome/.my.cnf"
- debug "using $defaultsfile"
+ defaultsfile="--defaults-extra-file=$userhome/.my.cnf"
+ debug "using $defaultsfile"
fi
#######################################################################
@@ -174,55 +176,55 @@ fi
if [ "$hotcopy" == "yes" ]
then
- info "Initializing hotcopy method"
- if [ "$databases" == "all" ]
- then
- if [ $usevserver = yes ]
- then
- info "dbhost: $dbhost"
- execstr="$VSERVER $vsname exec $MYSQLHOTCOPY -h $dbhost --quiet --allowold --regexp /.\*/./.\*/ $hotdir"
- else
- execstr="$MYSQLHOTCOPY --quiet --allowold --regexp /.\*/./.\*/ $hotdir"
- fi
- debug "su $user -c \"$execstr\""
- if [ ! $test ]
- then
- output=`su $user -c "$execstr" 2>&1`
- code=$?
- if [ "$code" == "0" ]
- then
- debug $output
- info "Successfully finished hotcopy of all mysql databases"
- else
- warning $output
- warning "Failed to hotcopy all mysql databases"
- fi
- fi
- else
- for db in $databases
- do
- if [ $usevserver = yes ]
- then
- execstr="$VSERVER $vsname exec $MYSQLHOTCOPY --allowold $db $hotdir"
- else
- execstr="$MYSQLHOTCOPY --allowold $db $hotdir"
- fi
- debug 'su $user -c \"$execstr\"'
- if [ ! $test ]
- then
- output=`su $user -c "$execstr" 2>&1`
- code=$?
- if [ "$code" == "0" ]
- then
- debug $output
- info "Successfully finished hotcopy of mysql database $db"
- else
- warning $output
- warning "Failed to hotcopy mysql database $db"
- fi
- fi
- done
- fi
+ info "Initializing hotcopy method"
+ if [ "$databases" == "all" ]
+ then
+ if [ $usevserver = yes ]
+ then
+ info "dbhost: $dbhost"
+ execstr="$VSERVER $vsname exec $MYSQLHOTCOPY -h $dbhost --quiet --allowold --regexp /.\*/./.\*/ $hotdir"
+ else
+ execstr="$MYSQLHOTCOPY --quiet --allowold --regexp /.\*/./.\*/ $hotdir"
+ fi
+ debug "su $user -c \"$execstr\""
+ if [ ! $test ]
+ then
+ output=`su $user -c "$execstr" 2>&1`
+ code=$?
+ if [ "$code" == "0" ]
+ then
+ debug $output
+ info "Successfully finished hotcopy of all mysql databases"
+ else
+ warning $output
+ warning "Failed to hotcopy all mysql databases"
+ fi
+ fi
+ else
+ for db in $databases
+ do
+ if [ $usevserver = yes ]
+ then
+ execstr="$VSERVER $vsname exec $MYSQLHOTCOPY --allowold $db $hotdir"
+ else
+ execstr="$MYSQLHOTCOPY --allowold $db $hotdir"
+ fi
+ debug 'su $user -c \"$execstr\"'
+ if [ ! $test ]
+ then
+ output=`su $user -c "$execstr" 2>&1`
+ code=$?
+ if [ "$code" == "0" ]
+ then
+ debug $output
+ info "Successfully finished hotcopy of mysql database $db"
+ else
+ warning $output
+ warning "Failed to hotcopy mysql database $db"
+ fi
+ fi
+ done
+ fi
fi
##########################################################################
@@ -230,97 +232,97 @@ fi
if [ "$sqldump" == "yes" ]
then
- info "Initializing SQL dump method"
- if [ "$databases" == "all" ]
- then
- if [ $usevserver = yes ]
- then
- debug 'echo show databases | $VSERVER $vsname exec su $user -c \"$MYSQL $defaultsfile\" | grep -v Database'
- databases=`echo 'show databases' | $VSERVER $vsname exec su $user -c "$MYSQL $defaultsfile" | grep -v Database`
- if [ $? -ne 0 ]
- then
- fatal "Authentication problem, maybe user/password is wrong or mysqld is not running?"
- fi
- else
- databases=$(su $user -c "$MYSQL $defaultsfile -N -B -e 'show databases'" | sed 's/|//g;/\+----/d')
- if [ $? -ne 0 ]
- then
- fatal "Authentication problem, maybe user/password is wrong or mysqld is not running?"
- fi
- fi
- fi
+ info "Initializing SQL dump method"
+ if [ "$databases" == "all" ]
+ then
+ if [ $usevserver = yes ]
+ then
+ debug 'set -o pipefail ; echo show databases | $VSERVER $vsname exec su $user -c \"$MYSQL $defaultsfile\" | grep -v Database'
+ databases=`set -o pipefail ; echo 'show databases' | $VSERVER $vsname exec su $user -c "$MYSQL $defaultsfile" | grep -v Database`
+ if [ $? -ne 0 ]
+ then
+ fatal "Authentication problem, maybe user/password is wrong or mysqld is not running?"
+ fi
+ else
+ databases=$(set -o pipefail ; su $user -c "$MYSQL $defaultsfile -N -B -e 'show databases'" | sed 's/|//g;/\+----/d')
+ if [ $? -ne 0 ]
+ then
+ fatal "Authentication problem, maybe user/password is wrong or mysqld is not running?"
+ fi
+ fi
+ fi
- for db in $databases
- do
- DUMP_BASE="$MYSQLDUMP $defaultsfile --lock-tables --complete-insert --add-drop-table --quick --quote-names"
+ for db in $databases
+ do
+ DUMP_BASE="$MYSQLDUMP $defaultsfile $sqldumpoptions"
- # Dumping structure and data
- DUMP="$DUMP_BASE $ignore $db"
+ # Dumping structure and data
+ DUMP="$DUMP_BASE $ignore $db"
- # If requested, dump only the table structure for this database
- if echo "$nodata" | grep -E '(^|[[:space:]])'"$db\." >/dev/null
- then
- # Get the structure of the tables, without data
- DUMP_STRUCT="$DUMP_BASE --no-data $db"
- for qualified_table in $nodata
- do
- table=$( expr match "$qualified_table" "$db\.\([^\w]*\)" )
- DUMP_STRUCT="$DUMP_STRUCT $table"
- done
- DUMP="( $DUMP; $DUMP_STRUCT )"
- fi
- if [ $usevserver = yes ]
- then
- # Test to make sure mysqld is running, if it is not sqldump will not work
- $VSERVER $vsname exec su $user -c "$MYSQLADMIN $defaultsfile ping"
- if [ $? -ne 0 ]; then
- fatal "Either you have an authentication problem, or mysqld doesn't appear to be running!"
- fi
- if [ "$compress" == "yes" ]; then
- execstr="$VSERVER $vsname exec $DUMP | $GZIP > $vroot$dumpdir/${db}.sql.gz"
- else
- execstr="$VSERVER $vsname exec $DUMP -r $vroot$dumpdir/${db}.sql"
- fi
- else
- # Test to make sure mysqld is running, if it is not sqldump will not work
- su $user -c "$MYSQLADMIN $defaultsfile ping"
- if [ $? -ne 0 ]; then
- fatal "Either you have an authentication problem, or mysqld doesn't appear to be running!"
- fi
- if [ "$compress" == "yes" ]; then
- execstr="$DUMP | $GZIP > $dumpdir/${db}.sql.gz"
- else
- execstr="$DUMP -r $dumpdir/${db}.sql"
- fi
- fi
- debug "su $user -c \"$execstr\""
- if [ ! $test ]
- then
- output=`su $user -c "$execstr" 2>&1`
- code=$?
- if [ "$code" == "0" ]
- then
- debug $output
- info "Successfully finished dump of mysql database $db"
- else
- warning $output
- warning "Failed to dump mysql databases $db"
- fi
- fi
- done
+ # If requested, dump only the table structure for this database
+ if echo "$nodata" | grep -E '(^|[[:space:]])'"$db\." >/dev/null
+ then
+ # Get the structure of the tables, without data
+ DUMP_STRUCT="$DUMP_BASE --no-data $db"
+ for qualified_table in $nodata
+ do
+ table=$( expr match "$qualified_table" "$db\.\([^\w]*\)" )
+ DUMP_STRUCT="$DUMP_STRUCT $table"
+ done
+ DUMP="( $DUMP; $DUMP_STRUCT )"
+ fi
+ if [ $usevserver = yes ]
+ then
+ # Test to make sure mysqld is running, if it is not sqldump will not work
+ $VSERVER $vsname exec su $user -c "$MYSQLADMIN $defaultsfile ping 2>&1 >/dev/null"
+ if [ $? -ne 0 ]; then
+ fatal "mysqld doesn't appear to be running!"
+ fi
+ if [ "$compress" == "yes" ]; then
+ execstr="$VSERVER $vsname exec $DUMP | $GZIP --rsyncable > '$vroot$dumpdir/${db}.sql.gz'"
+ else
+ execstr="$VSERVER $vsname exec $DUMP -r '$vroot$dumpdir/${db}.sql'"
+ fi
+ else
+ # Test to make sure mysqld is running, if it is not sqldump will not work
+ su $user -c "$MYSQLADMIN $defaultsfile ping 2>&1 >/dev/null"
+ if [ $? -ne 0 ]; then
+ fatal "mysqld doesn't appear to be running!"
+ fi
+ if [ "$compress" == "yes" ]; then
+ execstr="$DUMP | $GZIP --rsyncable > '$dumpdir/${db}.sql.gz'"
+ else
+ execstr="$DUMP -r '$dumpdir/${db}.sql'"
+ fi
+ fi
+ debug "su $user -c \"$execstr\""
+ if [ ! $test ]
+ then
+ output=`su $user -c "set -o pipefail ; $execstr" 2>&1`
+ code=$?
+ if [ "$code" == "0" ]
+ then
+ debug $output
+ info "Successfully finished dump of mysql database $db"
+ else
+ warning $output
+ warning "Failed to dump mysql databases $db"
+ fi
+ fi
+ done
fi
# clean up tmp config file
if [ "$dbusername" != "" -a "$dbpassword" != "" ]
then
- ## clean up tmp config file
- debug "rm $workcnf"
- rm $workcnf
- if [ -f "$tmpcnf" ]
- then
- debug "mv $tmpcnf $workcnf"
- mv $tmpcnf $workcnf
- fi
+ ## clean up tmp config file
+ debug "rm $workcnf"
+ rm $workcnf
+ if [ -f "$tmpcnf" ]
+ then
+ debug "mv $tmpcnf $workcnf"
+ mv $tmpcnf $workcnf
+ fi
fi
return 0
diff --git a/handlers/pgsql.helper.in b/handlers/pgsql.helper.in
index 8024616..ff1cfd4 100644
--- a/handlers/pgsql.helper.in
+++ b/handlers/pgsql.helper.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS pgsql:postgresql_database_backup"
@@ -13,20 +14,20 @@ do_pgsql_databases() {
while [ -z "$REPLY" ]; do
formBegin "$pgsql_title: databases"
formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
- formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
+ formItem "Database:"
formDisplay
[ $? = 0 ] || return 1
pgsql_databases="databases = "
for i in $REPLY; do
- [ -n "$i" ] && pgsql_databases="$pgsql_databases $i"
+ [ -n "$i" ] && pgsql_databases="$pgsql_databases $i"
done
done
}
@@ -86,14 +87,14 @@ EOF
$pgsql_backupdir
# databases = < all | db1 db2 db3 > (default = all)
-# which databases to backup. should either be the word 'all' or a
+# which databases to backup. should either be the word 'all' or a
# space separated list of database names.
# Note: when using 'all', pg_dumpall is used instead of pg_dump, which means
# that cluster-wide data (such as users and groups) are saved.
$pgsql_databases
# compress = < yes | no > (default = yes)
-# if yes, compress the pg_dump/pg_dumpall output.
+# if yes, compress the pg_dump/pg_dumpall output.
$pgsql_compress
### You can also set the following variables in backupninja.conf:
diff --git a/handlers/pgsql.in b/handlers/pgsql.in
index f334bf2..77a73fe 100644
--- a/handlers/pgsql.in
+++ b/handlers/pgsql.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# PostgreSQL handler script for backupninja
#
@@ -36,18 +37,18 @@ if [ $usevserver = yes ]; then
debug "Examining vserver '$vsname'."
if [ "$databases" == "all" ]; then
[ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMPALL`" ] || \
- fatal "Can't find $PGSQLDUMPALL in vserver $vsname."
+ fatal "Can't find $PGSQLDUMPALL in vserver $vsname."
else
[ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMP`" ] || \
- fatal "Can't find $PGSQLDUMP in vserver $vsname."
+ fatal "Can't find $PGSQLDUMP in vserver $vsname."
fi
else
if [ "$databases" == "all" ]; then
[ -x "`which $PGSQLDUMPALL`" ] || \
- fatal "Can't find $PGSQLDUMPALL."
+ fatal "Can't find $PGSQLDUMPALL."
else
[ -x "`which $PGSQLDUMP`" ] || \
- fatal "Can't find $PGSQLDUMP."
+ fatal "Can't find $PGSQLDUMP."
fi
fi
@@ -64,7 +65,7 @@ else
pguid=`getent passwd $PGSQLUSER | @AWK@ -F: '{print $3}'`
fi
[ -n "$pguid" ] || \
- fatal "No user called $PGSQLUSER`[ $usevserver = no ] || echo \" on vserver $vsname\"`."
+ fatal "No user called $PGSQLUSER`[ $usevserver = no ] || echo \" on vserver $vsname\"`."
debug "chown $pguid $vroot$backupdir"
chown $pguid $vroot$backupdir
debug "chmod 700 $vroot$backupdir"
@@ -72,61 +73,61 @@ chmod 700 $vroot$backupdir
# if $databases = all, use pg_dumpall
if [ "$databases" == "all" ]; then
- if [ $usevserver = yes ]; then
- if [ "$compress" == "yes" ]; then
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMPALL | $GZIP > $backupdir/${vsname}.sql.gz\""
- else
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMPALL > $backupdir/${vsname}.sql\""
- fi
- else
- if [ "$compress" == "yes" ]; then
- execstr="su - $PGSQLUSER -c \"$PGSQLDUMPALL | $GZIP > $backupdir/${localhost}-all.sql.gz\""
- else
- execstr="su - $PGSQLUSER -c \"$PGSQLDUMPALL > $backupdir/${localhost}-all.sql\""
- fi
- fi
- debug "$execstr"
- if [ ! $test ]; then
- output=`eval $execstr 2>&1`
- code=$?
- if [ "$code" == "0" ]; then
- debug $output
- info "Successfully finished dump of pgsql cluster"
- else
- warning $output
- warning "Failed to dump pgsql cluster"
- fi
- fi
-
+ if [ $usevserver = yes ]; then
+ if [ "$compress" == "yes" ]; then
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP --rsyncable > '$backupdir/${vsname}.sql.gz'\""
+ else
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${vsname}.sql'\""
+ fi
+ else
+ if [ "$compress" == "yes" ]; then
+ execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP --rsyncable > '$backupdir/${localhost}-all.sql.gz'\""
+ else
+ execstr="su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${localhost}-all.sql'\""
+ fi
+ fi
+ debug "$execstr"
+ if [ ! $test ]; then
+ output=`eval $execstr 2>&1`
+ code=$?
+ if [ "$code" == "0" ]; then
+ debug $output
+ info "Successfully finished dump of pgsql cluster"
+ else
+ warning $output
+ warning "Failed to dump pgsql cluster"
+ fi
+ fi
+
# else use pg_dump on each specified database
else
- for db in $databases; do
- if [ $usevserver = yes ]; then
- if [ "$compress" == "yes" ]; then
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMP $db | $GZIP > $backupdir/${db}.sql.gz\""
- else
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMP $db | > $backupdir/${db}.sql\""
- fi
- else
- if [ "$compress" == "yes" ]; then
- execstr="su - $PGSQLUSER -c \"$PGSQLDUMP $db | $GZIP > $backupdir/${db}.sql.gz\""
- else
- execstr="su - $PGSQLUSER -c \"$PGSQLDUMP $db > $backupdir/${db}.sql\""
- fi
- fi
- debug "$execstr"
- if [ ! $test ]; then
- output=`eval $execstr 2>&1`
- code=$?
- if [ "$code" == "0" ]; then
- debug $output
- info "Successfully finished dump of pgsql database ${db}"
- else
- warning $output
- warning "Failed to dump pgsql database ${db}"
- fi
- fi
- done
+ for db in $databases; do
+ if [ $usevserver = yes ]; then
+ if [ "$compress" == "yes" ]; then
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP --rsyncable > '$backupdir/${db}.sql.gz'\""
+ else
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMP $db | > '$backupdir/${db}.sql'\""
+ fi
+ else
+ if [ "$compress" == "yes" ]; then
+ execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP --rsyncable > '$backupdir/${db}.sql.gz'\""
+ else
+ execstr="su - $PGSQLUSER -c \"$PGSQLDUMP $db > '$backupdir/${db}.sql'\""
+ fi
+ fi
+ debug "$execstr"
+ if [ ! $test ]; then
+ output=`eval $execstr 2>&1`
+ code=$?
+ if [ "$code" == "0" ]; then
+ debug $output
+ info "Successfully finished dump of pgsql database ${db}"
+ else
+ warning $output
+ warning "Failed to dump pgsql database ${db}"
+ fi
+ fi
+ done
fi
return 0
diff --git a/handlers/rdiff.helper.in b/handlers/rdiff.helper.in
index 1055280..83f2fb5 100644
--- a/handlers/rdiff.helper.in
+++ b/handlers/rdiff.helper.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS rdiff:incremental_remote_filesystem_backup"
@@ -18,9 +19,9 @@ do_rdiff_host_includes() {
for ((i=0; i < ${#rdiff_includes[@]} ; i++)); do
formItem include ${rdiff_includes[$i]}
done
- formItem include
- formItem include
- formItem include
+ formItem include
+ formItem include
+ formItem include
formDisplay
[ $? = 0 ] || return
unset rdiff_includes
@@ -41,7 +42,7 @@ do_rdiff_vserver() {
while [ -z "$REPLY" ]; do
formBegin "$rdiff_title - vsincludes (backup these directories from every vserver)"
[ -z "$rdiff_vsincludes" ] && rdiff_vsincludes="$rdiff_default_includes"
-
+
for i in $rdiff_vsincludes; do
formItem include "$i"
done
@@ -52,19 +53,19 @@ do_rdiff_vserver() {
[ $? = 0 ] || return 1
rdiff_vsincludes=($REPLY)
done
-
+
set +o noglob
}
do_rdiff_excludes() {
set -o noglob
- formBegin "$rdiff_title: excludes"
+ formBegin "$rdiff_title: excludes"
for ((i=0; i < ${#rdiff_excludes[@]} ; i++))
do
formItem exclude ${rdiff_excludes[$i]}
done
- formItem exclude
- formItem exclude
+ formItem exclude
+ formItem exclude
formDisplay
[ $? = 0 ] || return
unset rdiff_excludes
@@ -102,10 +103,10 @@ do_rdiff_src() {
do_rdiff_dest() {
declare -a tmp_array
-
+
set -o noglob
REPLY=
- while [ -z "$REPLY" -o -z "$rdiff_directory" -o -z "$rdiff_host" -o -z "$rdiff_user" ]
+ while [ -z "$REPLY" -o -z "$rdiff_directory" -o -z "$rdiff_host" -o -z "$rdiff_user" ]
do
formBegin "$rdiff_title - destination: last three items are required"
formItem "keep" "$rdiff_keep"
@@ -119,11 +120,11 @@ do_rdiff_dest() {
rdiff_keep=${tmp_array[0]}
rdiff_directory=${tmp_array[1]}
rdiff_host=${tmp_array[2]}
- rdiff_user=${tmp_array[3]}
+ rdiff_user=${tmp_array[3]}
rdiff_type=${tmp_array[4]}
done
set +o noglob
-
+
_dest_done="(DONE)"
setDefault conn
}
@@ -155,7 +156,7 @@ do_rdiff_ssh_con() {
echo "Done. hit return to continue"
read
fi
-
+
ssh -o PreferredAuthentications=publickey $rdiff_host -l $rdiff_user "exit" 2> /dev/null
if [ $? -ne 0 ]; then
echo "Copying root's public ssh key to authorized_keys of $rdiff_user@$rdiff_host. When prompted, specify the password for user $rdiff_user@$rdiff_host."
@@ -171,7 +172,7 @@ do_rdiff_ssh_con() {
1 ) msgBox "$rdiff_title: error" "Connected successfully to $rdiff_user@$rdiff_host, but unable to write. Check ownership and modes of ~$rdiff_user on $rdiff_host." ;;
255 ) msgBox "$rdiff_title: error" "Failed to connect to $rdiff_user@$rdiff_host. Check hostname, username, and password. Also, make sure sshd is running on the destination host." ;;
* ) msgBox "$rdiff_title: error" "Unexpected error." ;;
- esac
+ esac
return
else
echo "Done. hit return to continue"
@@ -195,20 +196,20 @@ do_rdiff_ssh_con() {
else
booleanBox "Remote directory does not exist" "The destination backup directory does not exist, do you want me to create it for you?"
if [ $? = 0 ]; then
- ssh $rdiff_user@$rdiff_host "mkdir -p ${rdiff_directory}"
+ ssh $rdiff_user@$rdiff_host "mkdir -p ${rdiff_directory}"
result=$?
case $result in
0) msgBox "$rdiff_title: success" "Creation of the remote destination directory was a success!";;
- 1) msgBox "$rdiff_title: error" "Connected successfully to $rdiff_user@$rdiff_host, but was unable to create the destination directory, check the directory permissions."
+ 1) msgBox "$rdiff_title: error" "Connected successfully to $rdiff_user@$rdiff_host, but was unable to create the destination directory, check the directory permissions."
remote_status=failed;;
- 255) msgBox "$rdiff_title: error" "Failed to connect to $rdiff_user@$rdiff_host. Check hostname, username, and password. Also, make sure sshd is running on the destination host."
+ 255) msgBox "$rdiff_title: error" "Failed to connect to $rdiff_user@$rdiff_host. Check hostname, username, and password. Also, make sure sshd is running on the destination host."
remote_status=failed;;
- *) msgBox "$rdiff_title: error" "Unexpected error."
+ *) msgBox "$rdiff_title: error" "Unexpected error."
remote_status=failed;;
esac
fi
fi
-
+
if [ "$remote_status" = "ok" ]; then
do_rdiff_con
fi
@@ -231,12 +232,12 @@ do_rdiff_con() {
echo "Hit return to continue."
read
case $result in
- 0) msgBox "$rdiff_title: success" "Installation of rdiff-backup was a success!"
+ 0) msgBox "$rdiff_title: success" "Installation of rdiff-backup was a success!"
do_rdiff_con;;
1) msgBox "$rdiff_title: error" "Connected successfully to $rdiff_user@$rdiff_host, but was unable to install the package for some reason.";;
255) msgBox "$rdiff_title: error" "Failed to connect to $rdiff_user@$rdiff_host. Check hostname, username, and password. Also, make sure sshd is running on the destination host.";;
*) msgBox "$rdiff_title: error" "Unexpected error.";;
- esac
+ esac
return
fi
else
@@ -256,9 +257,9 @@ do_rdiff_con() {
fi
fi
else
- echo "SUCCESS: Everything looks good!"
- echo "Hit return to continue."
- read
+ echo "SUCCESS: Everything looks good!"
+ echo "Hit return to continue."
+ read
fi
_con_done="(DONE)"
@@ -298,11 +299,11 @@ EOF
set -o noglob
for ((i=0; i < ${#rdiff_includes[@]} ; i++)); do
echo "include = ${rdiff_includes[$i]}" >> $next_filename
- done
+ done
set +o noglob
fi
- if [ "$host_or_vservers" == vservers -o "$host_or_vservers" == both ]; then
+ if [ "$host_or_vservers" == vservers -o "$host_or_vservers" == both ]; then
cat >> $next_filename <<EOF
#
# If vservers = yes in /etc/backupninja.conf then the following variables can
@@ -328,7 +329,7 @@ EOF
done
set +o noglob
fi
-
+
## excludes ##
set -o noglob
for ((i=0; i < ${#rdiff_excludes[@]} ; i++)); do
@@ -340,7 +341,7 @@ EOF
######################################################
## destination section
## (where the files are copied to)
-
+
[dest]
type = remote
directory = $rdiff_directory
@@ -382,7 +383,7 @@ rdiff_main_menu() {
}
rdiff_wizard() {
-
+
# Global variables
rdiff_title="rdiff-backup action wizard"
_src_done=
@@ -401,7 +402,7 @@ rdiff_wizard() {
rdiff_excludes=(/home/*/.gnupg /home/*/.local/share/Trash /home/*/.Trash /home/*/.thumbnails /home/*/.beagle /home/*/.aMule /home/*/gtk-gnutella-downloads)
rdiff_vsincludes=
set +o noglob
-
+
rdiff_main_menu
}
diff --git a/handlers/rdiff.in b/handlers/rdiff.in
index 0f93429..60386fa 100644
--- a/handlers/rdiff.in
+++ b/handlers/rdiff.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# rdiff-backup handler script for backupninja
# requires rdiff-backup
@@ -7,68 +8,74 @@
### FUNCTIONS ###
function test_connection() {
- # given a user and host,
- # tests the connection.
- # if user or host is missing, returns 0
- # (ie, assume it's a local connection).
- if [ $# -lt 2 ]; then
- debug "(local is assumed to be a good connection)"
- return 0
- fi
- local user=$1
- local host=$2
- debug "ssh $sshoptions -o PasswordAuthentication=no $host -l $user 'echo -n 1'"
- local ret=`ssh $sshoptions -o PasswordAuthentication=no $host -l $user 'echo -n host is alive'`
- if echo $ret | grep "host is alive"; then
- debug "Connected to $host as $user successfully"
- else
- fatal "Can't connect to $host as $user."
- fi
+ # given a user and host,
+ # tests the connection.
+ # if user or host is missing, returns 0
+ # (ie, assume it's a local connection).
+ if [ $# -lt 2 ]; then
+ debug "(local is assumed to be a good connection)"
+ return 0
+ fi
+ local user=$1
+ local host=$2
+ debug "ssh $sshoptions -o PasswordAuthentication=no $host -l $user 'echo -n 1'"
+ local ret=`ssh $sshoptions -o PasswordAuthentication=no $host -l $user 'echo -n host is alive'`
+ if echo $ret | grep "host is alive"; then
+ debug "Connected to $host as $user successfully"
+ else
+ fatal "Can't connect to $host as $user."
+ fi
}
function get_version() {
- # given no arguments, returns the local version.
- # given a user and host, returns the remote version.
- # if user or host is missing, returns the local version.
- if [ "$#" -lt 2 ]; then
- debug "$RDIFFBACKUP -V"
- echo `$RDIFFBACKUP -V`
- else
- local user=$1
- local host=$2
- debug "ssh $sshoptions $host -l $user '$RDIFFBACKUP -V'"
- echo `ssh $sshoptions $host -l $user "$RDIFFBACKUP -V | grep rdiff-backup"`
- fi
+ # given no arguments, returns the local version.
+ # given a user and host, returns the remote version.
+ # if user or host is missing, returns the local version.
+ local version
+ if [ "$#" -lt 2 ]; then
+ debug "$RDIFFBACKUP -V"
+ echo `$RDIFFBACKUP -V`
+ else
+ local user=$1
+ local host=$2
+ debug "ssh $sshoptions $host -l $user '$RDIFFBACKUP -V'"
+ version=`ssh $sshoptions $host -l $user "$RDIFFBACKUP -V"`
+ if [ $? = 127 ]; then
+ fatal "Unable to execute rdiff-backup on remote server. It probably isn't installed"
+ else
+ echo "$version" | grep rdiff-backup
+ fi
+ fi
}
function check_consistency() {
- local section=$1
- local type=$2
- local user=$3
- local host=$4
- if [ "$type" == "local" ]; then
- if [ "$user" != "" ]; then
- warning "User should not be specified for local $section."
- fi
- if [ "$host" != "" ]; then
- warning "Host should not be specified for local $section."
- fi
- fi
- if [ "$type" == "remote" ]; then
- if [ "$user" == "" ]; then
- fatal "User must be specified for remote $section."
- fi
- if [ "host" == "" ]; then
- fatal "Host must be specifed for remote $section."
- fi
- fi
+ local section=$1
+ local type=$2
+ local user=$3
+ local host=$4
+ if [ "$type" == "local" ]; then
+ if [ "$user" != "" ]; then
+ warning "User should not be specified for local $section."
+ fi
+ if [ "$host" != "" ]; then
+ warning "Host should not be specified for local $section."
+ fi
+ fi
+ if [ "$type" == "remote" ]; then
+ if [ "$user" == "" ]; then
+ fatal "User must be specified for remote $section."
+ fi
+ if [ "$host" == "" ]; then
+ fatal "Host must be specifed for remote $section."
+ fi
+ fi
}
function check_cstream() {
- local cstream=$1
- if [ ! -x $cstream ]; then
- fatal "Can't find your cstream binary (trying: $cstream). If you use bwlimit you must have cstream installed."
- fi
+ local cstream=$1
+ if [ ! -x $cstream ]; then
+ fatal "Can't find your cstream binary (trying: $cstream). If you use bwlimit you must have cstream installed."
+ fi
}
### GET CONFIG ###
@@ -102,7 +109,7 @@ getconf sshoptions
check_consistency "destination" "$type" "$user" "$host"
if [ -n "$sshoptions" ] && echo $options | grep -qv "remote-schema"; then
- options="$options --remote-schema 'ssh -C $sshoptions %s rdiff-backup --server'"
+ options="$options --remote-schema 'ssh -C $sshoptions %s rdiff-backup --server'"
fi
### CHECK CONFIG ###
@@ -114,7 +121,7 @@ if [ $vservers_are_available = yes ]; then
vsnames="$found_vservers"
else
if ! vservers_exist "$vsnames" ; then
- fatal "At least one of the vservers listed in vsnames ($vsnames) does not exist."
+ fatal "At least one of the vservers listed in vsnames ($vsnames) does not exist."
fi
fi
if [ -n "$vsinclude" ]; then
@@ -128,40 +135,40 @@ fi
# check the connection at the source and destination
[ -n "$test" ] || test=0
if [ "$testconnect" = "yes" ] || [ "${test}" -eq 1 ]; then
- test_connection $sourceuser $sourcehost
- test_connection $destuser $desthost
+ test_connection $sourceuser $sourcehost
+ test_connection $destuser $desthost
fi
if [ "$ignore_version" != "yes" ]; then
- # see that rdiff-backup has the same version at the source and destination
- sourceversion=`get_version $sourceuser $sourcehost`
- destversion=`get_version $destuser $desthost`
- if [ "$sourceversion" != "$destversion" ]; then
- fatal "rdiff-backup does not have the same version at the source and at the destination."
- fi
+ # see that rdiff-backup has the same version at the source and destination
+ sourceversion=`get_version $sourceuser $sourcehost`
+ destversion=`get_version $destuser $desthost`
+ if [ "$sourceversion" != "$destversion" ]; then
+ fatal "rdiff-backup does not have the same version at the source and at the destination."
+ fi
fi
# source specific checks
-case $sourcetype in
- remote ) execstr_sourcepart="$sourceuser@$sourcehost::/" ;;
- local ) execstr_sourcepart="/" ;;
- * ) fatal "sourcetype '$sourcetype' is neither local nor remote" ;;
+case $sourcetype in
+ remote ) execstr_sourcepart="$sourceuser@$sourcehost::/" ;;
+ local ) execstr_sourcepart="/" ;;
+ * ) fatal "sourcetype '$sourcetype' is neither local nor remote" ;;
esac
# destination specific checks
[ "$destdir" != "" ] || fatal "Destination directory not set"
-case $desttype in
- remote ) execstr_destpart="$destuser@$desthost::$destdir/$label" ;;
- local ) execstr_destpart="$destdir/$label" ;;
- * ) fatal "desttype '$desttype' is neither local nor remote" ;;
+case $desttype in
+ remote ) execstr_destpart="$destuser@$desthost::$destdir/$label" ;;
+ local ) execstr_destpart="$destdir/$label" ;;
+ * ) fatal "desttype '$desttype' is neither local nor remote" ;;
esac
-
+
### REMOVE OLD BACKUPS ###
if [ "$keep" != yes ]; then
if [ "`echo $keep | tr -d 0-9`" == "" ]; then
- # add D if no other date unit is specified
+ # add D if no other date unit is specified
keep="${keep}D"
fi
@@ -175,27 +182,27 @@ if [ "$keep" != yes ]; then
if [ $test = 0 ]; then
output="`su -c "$removestr" 2>&1`"
if [ $? = 0 ]; then
- debug $output
- info "Removing backups older than $keep days succeeded."
+ debug $output
+ info "Removing backups older than $keep days succeeded."
else
- warning $output
- warning "Failed removing backups older than $keep."
+ warning $output
+ warning "Failed removing backups older than $keep."
fi
fi
fi
-# Add cstream
+# Add cstream
if [ ! -z $bwlimit ]; then
- check_cstream $CSTREAM;
- if [ "$desttype" = "remote" ]; then
- RDIFFBACKUP="$RDIFFBACKUP --remote-schema 'cstream -t $bwlimit | ssh %s \''rdiff-backup --server\'''"
- elif [ "$sourcetype" = "remote" ]; then
- RDIFFBACKUP="$RDIFFBACKUP --remote-schema 'ssh %s \''rdiff-backup --server\'' | cstream -t $bwlimit'"
- else
- fatal "You specified a bandwidth limit but neither your source nor destination types are remote."
- fi
+ check_cstream $CSTREAM;
+ if [ "$desttype" = "remote" ]; then
+ RDIFFBACKUP="$RDIFFBACKUP --remote-schema 'cstream -t $bwlimit | ssh %s \''rdiff-backup --server\'''"
+ elif [ "$sourcetype" = "remote" ]; then
+ RDIFFBACKUP="$RDIFFBACKUP --remote-schema 'ssh %s \''rdiff-backup --server\'' | cstream -t $bwlimit'"
+ else
+ fatal "You specified a bandwidth limit but neither your source nor destination types are remote."
+ fi
fi
### EXECUTE ###
@@ -208,29 +215,38 @@ symlinks_warning="Maybe you have mixed symlinks and '*' in this statement, which
# TODO: order the includes and excludes
# excludes
+SAVEIFS=$IFS
+IFS=$(echo -en "\n\b")
for i in $exclude; do
str="${i//__star__/*}"
execstr="${execstr}--exclude '$str' "
done
-# includes
+IFS=$SAVEIFS
+# includes
+SAVEIFS=$IFS
+IFS=$(echo -en "\n\b")
for i in $include; do
[ "$i" != "/" ] || fatal "Sorry, you cannot use 'include = /'"
str="${i//__star__/*}"
execstr="${execstr}--include '$str' "
done
+IFS=$SAVEIFS
# vsinclude
if [ $usevserver = yes ]; then
for vserver in $vsnames; do
+ SAVEIFS=$IFS
+ IFS=$(echo -en "\n\b")
for vi in $vsinclude; do
- str="${vi//__star__/*}"
- str="$VROOTDIR/$vserver$str"
+ str="${vi//__star__/*}"
+ str="$VROOTDIR/$vserver$str"
if [ -n "$str" ]; then
- execstr="${execstr}--include '$str' "
+ execstr="${execstr}--include '$str' "
else
warning "vsinclude statement '${vi//__star__/*}' will be ignored for VServer $vserver. $symlinks_warning"
fi
done
+ IFS=$SAVEIFS
done
fi
@@ -238,20 +254,20 @@ set +o noglob
# exclude everything else
[ "$include" != "" -o "$vsinclude" != "" ] && execstr="${execstr}--exclude '/*' "
-
+
# include client-part and server-part
execstr="${execstr}$execstr_sourcepart $execstr_destpart"
debug "$execstr"
if [ $test = 0 ]; then
- output=`nice -n $nicelevel su -c "$execstr" 2>&1`
- if [ $? = 0 ]; then
- debug $output
- info "Successfully finished backing up source $label"
- else
- warning $output
- warning "Failed backup up source $label"
- fi
-fi
+ output=`nice -n $nicelevel su -c "$execstr" 2>&1`
+ if [ $? = 0 ]; then
+ debug $output
+ info "Successfully finished backing up source $label"
+ else
+ warning $output
+ warning "Failed backup up source $label"
+ fi
+fi
return 0
diff --git a/handlers/rsync.in b/handlers/rsync.in
index de746d5..8f638d7 100644
--- a/handlers/rsync.in
+++ b/handlers/rsync.in
@@ -1,13 +1,29 @@
+# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
-# backupninja handler to do incremental backups using
-# rsync and hardlinks, based on
+# backupninja handler for incremental backups using rsync and hardlinks
+# feedback: rhatto at riseup.net
+#
+# rsync handler is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or any later version.
+#
+# rsync handler is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+# Place - Suite 330, Boston, MA 02111-1307, USA
+#
+# Inspiration
+# -----------
#
# - http://www.mikerubel.org/computers/rsync_snapshots/
# - rsnap handler by paulv at bikkel.org
# - maildir handler from backupninja
#
-# feedback: rhatto at riseup.net | gpl
-#
# Config file options
# -------------------
#
@@ -292,7 +308,7 @@ function rotate_short {
local keep="$2"
local metadata="`dirname $folder`/metadata"
- if [[ "$keep" < 4 ]]; then
+ if [[ "$keep" -lt 4 ]]; then
error "Rotate: minimum of 4 rotations"
exit 1
fi
@@ -327,7 +343,7 @@ function rotate_short_remote {
local metadata="`dirname $folder`/metadata"
local keep="$2"
- if [[ "$2" < 4 ]]; then
+ if [[ "$2" -lt 4 ]]; then
error "Rotate: minimum of 4 rotations"
exit 1
fi
diff --git a/handlers/sh.in b/handlers/sh.in
index f9f1926..b070f3b 100644
--- a/handlers/sh.in
+++ b/handlers/sh.in
@@ -1,7 +1,8 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# shell script handler for backupninja
# runs the file /etc/backup.d/scriptname.sh
-#
+#
[ $test ] || ( . $1 )
diff --git a/handlers/svn.in b/handlers/svn.in
index d19b0b8..5e5531a 100644
--- a/handlers/svn.in
+++ b/handlers/svn.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# this handler will backup subversion repostitories.
#
@@ -7,7 +8,7 @@ getconf src /var/lib/svn
getconf dest /var/backups/svn
getconf tmp /var/backups/svn.tmp
getconf HOTBACKUP "/usr/bin/svnadmin hotcopy"
-getconf vsname
+getconf vsname
error=0
@@ -35,43 +36,43 @@ fi
cd $vroot$src
for repo in `find . -name svnserve.conf`
do
- repo=`dirname $repo`
- repo=`dirname $repo`
+ repo=`dirname $repo`
+ repo=`dirname $repo`
- ret=`mkdir -p $vroot$tmp/$repo 2>&1`
- code=$?
- if [ "$ret" ]; then
- debug "$ret"
- fi
- if [ $code != 0 ]; then
- error "command failed mkdir -p $vroot$tmp/$repo"
- fi
+ ret=`mkdir -p $vroot$tmp/$repo 2>&1`
+ code=$?
+ if [ "$ret" ]; then
+ debug "$ret"
+ fi
+ if [ $code != 0 ]; then
+ error "command failed mkdir -p $vroot$tmp/$repo"
+ fi
- if [ $usevserver = yes ]
- then
- ret=`$VSERVER $vsname exec $HOTBACKUP $src/$repo $tmp/$repo 2>&1`
- else
- ret=`$HOTBACKUP $src/$repo $tmp/$repo 2>&1`
- fi
- code=$?
- if [ "$ret" ]; then
- debug "$ret"
- fi
- if [ $code != 0 ]; then
- error "command failed -- $HOTBACKUP $vroot$src/$repo $vroot$tmp/$repo"
- error=1
- fi
+ if [ $usevserver = yes ]
+ then
+ ret=`$VSERVER $vsname exec $HOTBACKUP $src/$repo $tmp/$repo 2>&1`
+ else
+ ret=`$HOTBACKUP $src/$repo $tmp/$repo 2>&1`
+ fi
+ code=$?
+ if [ "$ret" ]; then
+ debug "$ret"
+ fi
+ if [ $code != 0 ]; then
+ error "command failed -- $HOTBACKUP $vroot$src/$repo $vroot$tmp/$repo"
+ error=1
+ fi
done
if [ $error -eq 1 ]; then
- echo "Error: because of earlier errors, we are leaving svn backups in $vroot$tmp instead of $vroot$dest"
+ echo "Error: because of earlier errors, we are leaving svn backups in $vroot$tmp instead of $vroot$dest"
else
- if [ -d $vroot$dest -a -d $vroot$tmp ]; then
- rm -rf $vroot$dest
- fi
- if [ -d $vroot$tmp ]; then
- mv $vroot$tmp $vroot$dest
- fi
+ if [ -d $vroot$dest -a -d $vroot$tmp ]; then
+ rm -rf $vroot$dest
+ fi
+ if [ -d $vroot$tmp ]; then
+ mv $vroot$tmp $vroot$dest
+ fi
fi
exit 0
diff --git a/handlers/sys.helper.in b/handlers/sys.helper.in
index d3d99a5..8a2fb07 100644
--- a/handlers/sys.helper.in
+++ b/handlers/sys.helper.in
@@ -1,26 +1,33 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS sys:general_hardware_and_system_info"
sys_wizard() {
require_packages hwinfo debconf-utils
checkBox "new sys action" "check options" \
- "packages" "list of all installed packages." on \
- "partitions" "the partition table of all disks." on \
- "sfdisk" "use sfdisk to get partition information." on \
- "hardware" "detailed hardware information" on
- [ $? = 1 ] && return;
+ "packages" "list of all installed packages." on \
+ "partitions" "the partition table of all disks." on \
+ "sfdisk" "use sfdisk to get partition information." on \
+ "hardware" "detailed hardware information" on \
+ "luksheaders" "Luks headers of all Luks partitions." off \
+ "lvm" "LVM metadata for all volume groups." off
+ [ $? = 1 ] && return;
result="$REPLY"
packages="packages = no"
partitions="partitions = no"
sfdisk="dosfdisk = no"
hardware="hardware = no"
+ luksheaders="luksheaders = no"
+ lvm="lvm = no"
for opt in $result; do
case $opt in
- '"packages"') packages="packages = yes";;
- '"partitions"') partitions="partitions = yes";;
- '"sfdisk"') sfdisk="dosfdisk = yes";;
- '"hardware"') hardware="hardware = yes";;
+ '"packages"') packages="packages = yes";;
+ '"partitions"') partitions="partitions = yes";;
+ '"sfdisk"') sfdisk="dosfdisk = yes";;
+ '"hardware"') hardware="hardware = yes";;
+ '"luksheaders"') luksheaders="luksheaders = yes";;
+ '"lvm"') lvm="lvm = yes";;
esac
done
get_next_filename $configdirectory/10.sys
@@ -29,10 +36,14 @@ $packages
$partitions
$sfdisk
$hardware
+$luksheaders
+$lvm
+
# packagesfile = /var/backups/dpkg-selections.txt
# selectionsfile = /var/backups/debconfsel.txt
# partitionsfile = /var/backups/partitions.__star__.txt
# hardwarefile = /var/backups/hardware.txt
+# luksheadersfile = /var/backups/luksheader.__star__.bin
# If vservers = yes in /etc/backupninja.conf then the following variables can
# be used:
diff --git a/handlers/sys.in b/handlers/sys.in
index f293840..69751ed 100755
--- a/handlers/sys.in
+++ b/handlers/sys.in
@@ -1,7 +1,8 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# this handler will save various reports of vital system information.
-# by default, all the reports are enabled and are saved in /var/backups.
+# by default, all the reports are saved in /var/backups.
#
# (1) a capture of the debconf package selection states. This file
# can be used to restore the answers to debconf questions for
@@ -11,19 +12,27 @@
# (2) a list of all the packages installed and removed.
# this file can be used to restore the state of installed packages
# by running "dpkg --set-selections < dpkg-selections.txt and
-# then run "apt-get -u dselect-upgrade". If you have the
+# then run "apt-get -u dselect-upgrade". If you have the
# debconf-set-selections file from (1), you should restore those first.
-#
-# (3) the partition table of all disks.
+#
+# (3) the partition table of all disks.
# this partition table can be used to format another disk of
-# the same size. this can be handy if using software raid and
+# the same size. this can be handy if using software raid and
# you have a disk go bad. just replace the disk and partition it
# by running "sfdisk /dev/sdb < partitions.sdb.txt"
# (MAKE SURE YOU PARTITION THE CORRECT DISK!!!)
#
-# (4) hardware information.
+# (4) hardware information.
# write to a text file the important things which hwinfo can gleen.
#
+# (5) the Luks header of every Luks block device, if option luksheaders
+# is enabled.
+# in case you (have to) scramble such a Luks header (for some time),
+# and restore it later by running "dd if=luksheader.sda2.bin of=/dev/sda2"
+# (MAKE SURE YOU PASS THE CORRECT DEVICE AS of= !!!)
+#
+# (6) LVM metadata for every detected volume group, if "lvm = yes"
+#
if [ -f /etc/debian_version ]
then
@@ -47,7 +56,7 @@ getconf dohwinfo yes
if [ ! -d $parentdir ]; then
mkdir -p $parentdir
fi
-
+
if [ $os = "debian" ]
then
getconf packagesfile $parentdir/dpkg-selections.txt
@@ -57,9 +66,9 @@ then
getconf debconfgetselections `which debconf-get-selections`
elif [ $os = "redhat" ]
then
- getconf packagesfile $parentdir/rpmpackages.txt
- getconf packagemgr `which rpm`
- getconf packagemgroptions ' -qa '
+ getconf packagesfile $parentdir/rpmpackages.txt
+ getconf packagemgr `which rpm`
+ getconf packagemgroptions ' -qa '
getconf SYSREPORT `which sysreport`
getconf sysreport_options ' -norpm '
@@ -82,6 +91,15 @@ getconf HWINFO `which hwinfo`
getconf sfdisk_options ""
getconf hwinfo_options ""
+getconf CRYPTSETUP `which cryptsetup`
+getconf DD `which dd`
+getconf luksheaders no
+getconf luksheadersfile $parentdir/luksheader.__star__.bin
+
+getconf VGS `which vgs`
+getconf VGCFGBACKUP `which vgcfgbackup`
+getconf lvm no
+
getconf vsnames all
# If vservers are configured, check that the ones listed in $vsnames are running.
@@ -97,6 +115,30 @@ if [ $vservers_are_available = yes ]; then
usevserver=yes
fi
+## SANITY CHECKS #########################
+
+if [ "$luksheaders" == "yes" ]; then
+ if [ ! -x "$DD" ]; then
+ warning "can't find dd, skipping backup of Luks headers."
+ luksheaders="no"
+ fi
+ if [ ! -x "$CRYPTSETUP" ]; then
+ warning "can't find cryptsetup, skipping backup of Luks headers."
+ luksheaders="no"
+ fi
+fi
+
+if [ "$lvm" == "yes" ]; then
+ if [ ! -x "$VGS" ]; then
+ warning "can't find vgs, skipping backup of LVM metadata"
+ lvm="no"
+ fi
+ if [ ! -x "$VGCFGBACKUP" ]; then
+ warning "can't find vgcfgbackup, skipping backup of LVM metadata"
+ lvm="no"
+ fi
+fi
+
## PACKAGES ##############################
#
@@ -122,7 +164,7 @@ if [ "$packages" == "yes" ]; then
# don't expand * since it can be used in $packagemgroptions
set -o noglob
debug "$VSERVER $vserver exec $packagemgr $packagemgroptions > $VROOTDIR/$vserver$packagesfile"
- $VSERVER $vserver exec $packagemgr $packagemgroptions > $VROOTDIR/$vserver$packagesfile || fatal "can not save $packagemgr info to $packagesfile"
+ $VSERVER $vserver exec $packagemgr $packagemgroptions > $VROOTDIR/$vserver$packagesfile || fatal "can not save $packagemgr info to $packagesfile"
set +o noglob
fi
# is $debconfgetselections available inside $vserver ?
@@ -143,9 +185,9 @@ if [ "$packages" == "yes" ]; then
unset found
done
fi
-
+
# We want to perform this on the host as well
- if [ -z "$packagemgr" -o ! -x "$packagemgr" ]; then
+ if [ -z "$packagemgr" -o ! -x "$packagemgr" ]; then
warning "can't find ${packagemgr}, skipping installed packages report."
else
# don't expand * since it can be used in $packagemgroptions
@@ -194,7 +236,7 @@ catiffile () {
done
fi
echo $DASHES >> $sysreportfile
-}
+}
catifexec () {
if [ -x $1 ]; then
@@ -204,12 +246,12 @@ catifexec () {
$* >> $sysreportfile 2>&1 || info "executing of $1 failed"
fi
}
-
+
STATUS="Determining $os version:"
catiffile $osversion
-STATUS="Determinding your current hostname: "
+STATUS="Determinding your current hostname: "
catifexec "/bin/hostname"
STATUS="Getting the date:"
@@ -231,17 +273,17 @@ if [ $os = "redhat" ]; then
catiffile "/bin/ls /etc/rc.d/rc*.d/"
elif [ $os = "debian" ]; then
- for level in 0 1 2 3 4 5 6 S; do
- echo "Level: $level" >> $sysreportfile
- for f in /etc/rc${level}.d/*; do
- # Remove /etc/Knn or Snn from beginning
- ff=$(echo $f | @SED@ 's_/etc/rc..d/[KS][0-9][0-9]__')
- if [ $f != $ff ]; then
- echo $ff >> $sysreportfile
- fi
- done
- echo "" >> $sysreportfile
- done
+ for level in 0 1 2 3 4 5 6 S; do
+ echo "Level: $level" >> $sysreportfile
+ for f in /etc/rc${level}.d/*; do
+ # Remove /etc/Knn or Snn from beginning
+ ff=$(echo $f | @SED@ 's_/etc/rc..d/[KS][0-9][0-9]__')
+ if [ $f != $ff ]; then
+ echo $ff >> $sysreportfile
+ fi
+ done
+ echo "" >> $sysreportfile
+ done
fi
STATUS="Getting bootloader information:"
@@ -250,34 +292,34 @@ catifexec "/bin/ls" "-alR /boot"
# This covers sparc, alpha, and intel (respectively)
# updated for grub -mpg
if [ -f /etc/silo.conf ]; then
- STATUS="Collecting information about the boot process (silo):"
- catiffile "/etc/silo.conf"
+ STATUS="Collecting information about the boot process (silo):"
+ catiffile "/etc/silo.conf"
fi
if [ -f /etc/milo.conf ]; then
- STATUS="Collecting information about the boot process (milo):"
- catiffile "/etc/milo.conf"
+ STATUS="Collecting information about the boot process (milo):"
+ catiffile "/etc/milo.conf"
fi
if [ -f /etc/lilo.conf ]; then
- STATUS="Collecting information about the boot process (lilo):"
- catiffile "/etc/lilo.conf"
- catifexec "/sbin/lilo" "-q"
+ STATUS="Collecting information about the boot process (lilo):"
+ catiffile "/etc/lilo.conf"
+ catifexec "/sbin/lilo" "-q"
fi
if [ -d /boot/grub -a -f /boot/grub/grub.conf -a -f /boot/grub/device.map ]; then
- STATUS="Collecting information about the boot process (grub.conf):"
- catiffile "/boot/grub/grub.conf"
- STATUS="Collecting information about the boot process (grub.map):"
- catiffile "/boot/grub/device.map"
+ STATUS="Collecting information about the boot process (grub.conf):"
+ catiffile "/boot/grub/grub.conf"
+ STATUS="Collecting information about the boot process (grub.map):"
+ catiffile "/boot/grub/device.map"
fi
if [ -f /etc/cluster.conf -o -f /etc/cluster.xml ] ; then
- STATUS="Gathering information on cluster setup"
- # 2.1 AS
- if [ -f /etc/cluster.conf ] ; then
- catiffile "/etc/cluster.conf"
- fi
- # Taroon
- if [ -f /etc/cluster.xml ] ; then
- catiffile "/etc/cluster.xml"
- fi
+ STATUS="Gathering information on cluster setup"
+ # 2.1 AS
+ if [ -f /etc/cluster.conf ] ; then
+ catiffile "/etc/cluster.conf"
+ fi
+ # Taroon
+ if [ -f /etc/cluster.xml ] ; then
+ catiffile "/etc/cluster.xml"
+ fi
fi
STATUS="Gathering sysctl information (sysctl -a):"
@@ -308,10 +350,10 @@ STATUS="Getting kernel version:"
catifexec "/bin/uname" "-a"
STATUS="Checking module information:"
catifexec "/sbin/lsmod"
-for x in $(/sbin/lsmod | /bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
+for x in $(/sbin/lsmod | /bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
) ; do
- STATUS="Checking module information $x:"
- catifexec "/sbin/modinfo" "$x"
+ STATUS="Checking module information $x:"
+ catifexec "/sbin/modinfo" "$x"
done
STATUS="Gathering information about your filesystems:"
@@ -334,27 +376,27 @@ STATUS="Collecting information regarding kernel modules"
VER=`uname -r`
catiffile "/lib/modules/$VER/modules.dep"
if [ -f /etc/conf.modules ]; then
- STATUS="Collecting information regarding kernel modules (conf.modules)"
- catiffile "/etc/conf.modules"
+ STATUS="Collecting information regarding kernel modules (conf.modules)"
+ catiffile "/etc/conf.modules"
fi
if [ -f /etc/modules.conf ]; then
- STATUS="Collecting information regarding kernel modules (modules.conf)"
- catiffile "/etc/modules.conf"
+ STATUS="Collecting information regarding kernel modules (modules.conf)"
+ catiffile "/etc/modules.conf"
fi
if [ -f /etc/modprobe.conf ]; then
- STATUS="Collecting information regarding kernel modules (modeprobe.conf)"
- catiffile "/etc/modprobe.conf"
+ STATUS="Collecting information regarding kernel modules (modeprobe.conf)"
+ catiffile "/etc/modprobe.conf"
fi
# dkms status
if [ -x /usr/sbin/dkms ] ; then
STATUS="Gathering current status of modules, versions and kernels (dkms):"
- catifexec "/usr/sbin/dkms" "status"
+ catifexec "/usr/sbin/dkms" "status"
fi
if [ -f /etc/sysconfig/isdncard ] ; then
- STATUS="Gathering information about ISDN:"
- catiffile "/etc/sysconfig/isdncard"
+ STATUS="Gathering information about ISDN:"
+ catiffile "/etc/sysconfig/isdncard"
fi
STATUS="Collecting information from the proc directory:"
@@ -437,18 +479,21 @@ if [ $os = "redhat" ]; then
elif [ $os = "debian" ]; then
catifexec "/sbin/vgdisplay" "-vv"
fi
-
+
+STATUS="Collecting device-mapper (dm) information:"
+catifexec '/sbin/dmsetup' 'info'
+
STATUS="Collecting SCSI Tape information (/etc/stinit.def)"
catiffile "/etc/stinit.def"
if [ -x /sbin/lsusb ] ; then
- STATUS="Collecting USB devices list (lsusb):"
- catifexec "/sbin/lsusb"
+ STATUS="Collecting USB devices list (lsusb):"
+ catifexec "/sbin/lsusb"
fi
if [ -x /usr/bin/lshal ] ; then
- STATUS="Collecting global devices list (lshal):"
- catifexec "/usr/bin/lshal"
+ STATUS="Collecting global devices list (lshal):"
+ catifexec "/usr/bin/lshal"
fi
@@ -467,24 +512,24 @@ fi
if [ "$partitions" == "yes" ]; then
if [ "$dosfdisk" == "yes" ]; then
- if [ ! -x "$SFDISK" ]; then
- warning "can't find sfdisk, skipping sfdisk report."
- partitions="no"
- fi
+ if [ ! -x "$SFDISK" ]; then
+ warning "can't find sfdisk, skipping sfdisk report."
+ partitions="no"
+ fi
fi
if [ "$dohwinfo" == "yes" ]; then
- if [ ! -x "$HWINFO" ]; then
- warning "can't find hwinfo, skipping partition report."
- partitions="no"
- fi
+ if [ ! -x "$HWINFO" ]; then
+ warning "can't find hwinfo, skipping partition report."
+ partitions="no"
+ fi
fi
fi
if [ "$hardware" == "yes" ]; then
- if [ ! -x "$HWINFO" ]; then
- warning "can't find hwinfo, skipping hardware report."
- hardware="no"
- fi
+ if [ ! -x "$HWINFO" ]; then
+ warning "can't find hwinfo, skipping hardware report."
+ hardware="no"
+ fi
fi
## HARDWARE #############################
@@ -492,48 +537,47 @@ fi
#
# here we use hwinfo to dump a table listing all the
# information we can find on the hardware of this machine
-#
+#
if [ "$hardware" == "yes" ]; then
if [ "dohwinfo" == "yes" ]; then
if [ -f $hardwarefile ]; then
- rm $hardwarefile
+ rm $hardwarefile
fi
touch $hardwarefile
echo -e "\n\n====================== summary ======================\n" >> $hardwarefile
debug "$HWINFO --short --cpu --network --disk --pci >> $hardwarefile"
$HWINFO --short --cpu --network --disk --pci >> $hardwarefile
for flag in cpu network bios pci; do
- echo -e "\n\n====================== $flag ======================\n" >> $hardwarefile
- $HWINFO --$flag >> $hardwarefile
+ echo -e "\n\n====================== $flag ======================\n" >> $hardwarefile
+ $HWINFO --$flag >> $hardwarefile
done
fi
fi
-
## PARTITIONS #############################
-# here we use sfdisk to dump a listing of all the partitions.
+# here we use sfdisk to dump a listing of all the partitions.
# these files can be used to directly partition a disk of the same size.
if [ "$partitions" == "yes" ]; then
if [ "$dosfdisk" == "yes" ]; then
devices=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^Disk /dev" | @AWK@ '{print $2}' | cut -d: -f1`
- if [ "$devices" == "" ]; then
- warning "No harddisks found"
- fi
- for dev in $devices; do
- debug "$SFDISK will try to backup partition tables for device $dev"
- [ -b $dev ] || continue
- label=${dev#/dev/}
- label=${label//\//-}
- outputfile=${partitionsfile//__star__/$label}
- debug "$SFDISK $sfdisk_options -d $dev > $outputfile 2>/dev/null"
- $SFDISK $sfdisk_options -d $dev > $outputfile 2>/dev/null
- if [ $? -ne 0 ]; then
- warning "The partition table for $dev could not be saved."
- fi
- done
+ if [ "$devices" == "" ]; then
+ warning "No harddisks found"
+ fi
+ for dev in $devices; do
+ debug "$SFDISK will try to backup partition tables for device $dev"
+ [ -b $dev ] || continue
+ label=${dev#/dev/}
+ label=${label//\//-}
+ outputfile=${partitionsfile//__star__/$label}
+ debug "$SFDISK $sfdisk_options -d $dev > $outputfile 2>/dev/null"
+ $SFDISK $sfdisk_options -d $dev > $outputfile 2>/dev/null
+ if [ $? -ne 0 ]; then
+ warning "The partition table for $dev could not be saved."
+ fi
+ done
fi
if [ "$dohwinfo" == "yes" ]; then
debug "Using $HWINFO to get all available disk information"
@@ -541,3 +585,99 @@ if [ "$partitions" == "yes" ]; then
$HWINFO --disk >> $hardwarefile
fi
fi
+
+if [ "$luksheaders" == "yes" ]; then
+ devices=`LC_ALL=C $SFDISK -l 2>/dev/null | grep "^Disk /dev" | @AWK@ '{print $2}' | cut -d: -f1`
+ [ -n "$devices" ] || warning "No block device found"
+ targetdevices=""
+ for dev in $devices; do
+ [ -b $dev ] || continue
+ debug "$CRYPTSETUP isLuks $dev"
+ $CRYPTSETUP isLuks $dev
+ [ $? -eq 0 ] && targetdevices="$targetdevices $dev"
+ done
+ for dev in $targetdevices; do
+ label=${dev#/dev/}
+ label=${label//\//-}
+ outputfile=${luksheadersfile//__star__/$label}
+ # the following sizes are expressed in terms of 512-byte sectors
+ debug "Let us find out the Luks header size for $dev"
+ debug "$CRYPTSETUP luksDump \"$dev\" | grep '^Payload offset:' | @AWK@ '{print $3}'"
+ headersize=`$CRYPTSETUP luksDump "$dev" | grep '^Payload offset:' | @AWK@ '{print $3}'`
+ if [ $? -ne 0 ]; then
+ warning "Could not compute the size of Luks header, skipping device $dev"
+ continue
+ elif [ -z "$headersize" -o -n "`echo \"$headersize\" | sed 's/[0-9]*//g'`" ]; then
+ warning "The computed size of Luks header is not an integer, skipping device $dev"
+ continue
+ fi
+ debug "Let us backup the Luks header of device $dev"
+ debug "$DD if=\"${dev}\" of=\"${outputfile}\" bs=512 count=\"${headersize}\""
+ output=`$DD if="${dev}" of="${outputfile}" bs=512 count="${headersize}" 2>&1`
+ exit_code=$?
+ if [ $exit_code -eq 0 ]; then
+ debug $output
+ info "The Luks header of $dev was saved to $outputfile."
+ else
+ debug $output
+ fatal "The Luks header of $dev could not be saved."
+ fi
+ done
+fi
+
+## LVM ####################################
+
+# returns 0 on success, 1 on error, 2 if not tried
+# outputs error message if error, reason if not tried
+function doLvmBackup () {
+ local lvmdir="$1"
+ if [ ! -d "$lvmdir" ]; then
+ if ! mkdir "$lvmdir"; then
+ echo "could not create $lvmdir"
+ return 2
+ else
+ info "successfully created $lvmdir"
+ fi
+ fi
+ if [ ! -w "$lvmdir" ]; then
+ echo "can not write to directory $lvmdir"
+ return 2
+ fi
+ debug "Let's try to gather the list of LVM volume groups"
+ debug "$VGS --options vg_name --noheadings | @SED@ 's/^[ ]*//' | @SED@ 's/[ ]*$//' | tr '\n' ' '"
+ vgs=`$VGS --options vg_name --noheadings | @SED@ 's/^[ ]*//' | @SED@ 's/[ ]*$//' | tr '\n' ' '`
+ debug "Let's try to backup LVM metadata for detected volume groups: $vgs"
+ debug "$VGCFGBACKUP --file \"${lvmdir}\"/\'%s\' $vgs"
+ output=`$VGCFGBACKUP --file "${lvmdir}"/'%s' $vgs`
+ exit_code=$?
+ debug $output
+ case $exit_code in
+ 0)
+ info "LVM metadata was saved to $lvmdir for volume groups: $vgs"
+ return 0
+ ;;
+ *)
+ echo "LVM metadata could not be saved for at least one of these volume groups: $vgs"
+ return 1
+ ;;
+ esac
+}
+
+if [ "$lvm" == "yes" ]; then
+ output=`doLvmBackup "${parentdir}/lvm"`
+ exit_code=$?
+ case $exit_code in
+ 0) # success. info message has already been displayed
+ true
+ ;;
+ 1) # error
+ fatal "$output"
+ ;;
+ 2) # could not even try
+ fatal "LVM metadata backup was not tried: $output"
+ ;;
+ *) # should never happen
+ fatal "Unhandled error ($exit_code) while trying to backup LVM metadata, please report a bug"
+ ;;
+ esac
+fi
diff --git a/handlers/tar.helper.in b/handlers/tar.helper.in
index cc9a89b..cdbe03a 100644
--- a/handlers/tar.helper.in
+++ b/handlers/tar.helper.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
HELPERS="$HELPERS tar:tar_backup"
@@ -14,7 +15,7 @@ tar_wizard() {
inputBox "$tar_title" "\"Name\" of backups" "$backupname"
[ $? = 1 ] && return
tar_backupname="backupname = $REPLY"
- backupname="$REPLY"
+ backupname="$REPLY"
inputBox "$tar_title" "Directory where to store the backups" "/net/backups/$backupname"
[ $? = 1 ] && return
@@ -24,8 +25,8 @@ tar_wizard() {
"none" "do not filter trough" off \
"compress" "filter trough compress" off \
"gzip" "filter trough gzip" off \
- "bzip" "filter trough bzip" on
- [ $? = 1 ] && return;
+ "bzip" "filter trough bzip" on
+ [ $? = 1 ] && return;
result="$REPLY"
tar_compress="compress = $REPLY "
@@ -47,14 +48,14 @@ tar_wizard() {
[ $? = 0 ] || return 1
tar_includes="includes = "
for i in $REPLY; do
- [ -n "$i" ] && tar_includes="$tar_includes $i"
+ [ -n "$i" ] && tar_includes="$tar_includes $i"
done
done
REPLY=
while [ -z "$REPLY" ]; do
formBegin "$tar_title: Excludes"
- formItem "Exclude:" /tmp
+ formItem "Exclude:" /tmp
formItem "Exclude:" /proc
formItem "Exclude:" /sys
formItem "Exclude:" /dev
@@ -63,17 +64,17 @@ tar_wizard() {
formItem "Exclude:" /misc
formItem "Exclude:" /net
formItem "Exclude:" /selinux
- formItem "Exclude:"
+ formItem "Exclude:"
formItem "Exclude:"
formDisplay
[ $? = 0 ] || return 1
tar_excludes="excludes = "
for i in $REPLY; do
- [ -n "$i" ] && tar_excludes="$tar_excludes $i"
+ [ -n "$i" ] && tar_excludes="$tar_excludes $i"
done
done
-# Save the config
+# Save the config
get_next_filename $configdirectory/10.tar
cat > $next_filename <<EOF
$tar_when_run
@@ -85,8 +86,8 @@ $tar_excludes
# tar binary - have to be GNU tar
#TAR=/bin/tar
-#DATE /bin/date
-#DATEFORMAT "%Y.%m.%d-%H%M"
+#DATE /bin/date
+#DATEFORMAT "%Y.%m.%d-%H%M"
EOF
chmod 600 $next_filename
diff --git a/handlers/tar.in b/handlers/tar.in
index 7f0d147..b4f8c58 100644
--- a/handlers/tar.in
+++ b/handlers/tar.in
@@ -1,17 +1,18 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# tar handler script for backupninja
-getconf backupname `hostname --fqdn`
-getconf backupdir /var/backups/`hostname --fqdn`
-getconf compress bzip
-getconf includes "/etc /home /usr/local"
-getconf excludes "/tmp /proc /dev /sys /net /misc /media /srv /selinux"
+getconf backupname `hostname --fqdn`
+getconf backupdir /var/backups/`hostname --fqdn`
+getconf compress bzip
+getconf includes "/etc /home /usr/local"
+getconf excludes "/tmp /proc /dev /sys /net /misc /media /srv /selinux"
-getconf TAR `which tar`
-getconf EXTENSION tar
-getconf DATE `which date`
-getconf DATEFORMAT "%Y.%m.%d-%H%M"
+getconf TAR `which tar`
+getconf EXTENSION tar
+getconf DATE `which date`
+getconf DATEFORMAT "%Y.%m.%d-%H%M"
# See if vservers are configured
if [ "$vservers" = "yes" ]
@@ -65,7 +66,7 @@ done
debug "Running backup: " $TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
- $includes
+ $includes
$TAR -c -p -v $compress_option $exclude_options \
-f "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`".$EXTENSION" \
@@ -73,7 +74,7 @@ $TAR -c -p -v $compress_option $exclude_options \
> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.list \
2> "$backupdir/$backupname-"`$DATE "+$DATEFORMAT"`.err
-[ $? -ne 0 ] && fatal "Tar backup failed"
+[ $? -ne 0 ] && fatal "Tar backup failed"
diff --git a/handlers/trac.in b/handlers/trac.in
index a4b7bdf..018bffd 100644
--- a/handlers/trac.in
+++ b/handlers/trac.in
@@ -1,4 +1,5 @@
# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# this handler will backup trac environments (based on the svn handler)
#
@@ -12,39 +13,38 @@ getconf tmp /var/backups/trac.tmp
cd $src
for repo in `find . -name VERSION`
do
-
- repo=`dirname $repo`
- if [ "$repo" == "." ]
- then
- repo=""
- fi
-
- # Just make the parent directory for $tmp/$repo
- parentdir=`dirname $tmp/$repo`
- ret=`mkdir -p $parentdir 2>&1`
- code=$?
- if [ "$ret" ]; then
- debug "$ret"
- fi
- if [ $code != 0 ]; then
- error "command failed mkdir -p $parentdir"
- fi
-
- ret=`trac-admin $src/$repo hotcopy $tmp/$repo 2>&1`
- code=$?
- if [ "$ret" ]; then
- debug "$ret"
- fi
- if [ $code != 0 ]; then
- error "command failed -- trac-admin $src/$repo hotcopy $tmp/$repo"
- fi
+ repo=`dirname $repo`
+ if [ "$repo" == "." ]
+ then
+ repo=""
+ fi
+
+ # Just make the parent directory for $tmp/$repo
+ parentdir=`dirname $tmp/$repo`
+ ret=`mkdir -p $parentdir 2>&1`
+ code=$?
+ if [ "$ret" ]; then
+ debug "$ret"
+ fi
+ if [ $code != 0 ]; then
+ error "command failed mkdir -p $parentdir"
+ fi
+
+ ret=`trac-admin $src/$repo hotcopy $tmp/$repo 2>&1`
+ code=$?
+ if [ "$ret" ]; then
+ debug "$ret"
+ fi
+ if [ $code != 0 ]; then
+ error "command failed -- trac-admin $src/$repo hotcopy $tmp/$repo"
+ fi
done
if [ -d $dest -a -d $tmp ]; then
- rm -rf $dest
+ rm -rf $dest
fi
if [ -d $tmp ]; then
- mv $tmp $dest
+ mv $tmp $dest
fi
exit 0
diff --git a/handlers/wget b/handlers/wget
index 51054ea..ebb391e 100644
--- a/handlers/wget
+++ b/handlers/wget
@@ -1,3 +1,5 @@
+# -*- mode: sh; sh-basic-offset: 3; indent-tabs-mode: nil; -*-
+# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# backupninja handler to do incremental backups using
# wget and hardlinks, based on rsync handler
@@ -77,84 +79,82 @@ getconf folder
function rotate {
- if [[ "$2" < 4 ]]; then
- error "Rotate: minimum of 4 rotations"
- exit 1
- fi
+ if [[ "$2" < 4 ]]; then
+ error "Rotate: minimum of 4 rotations"
+ exit 1
+ fi
- if [ -d $1.$2 ]; then
- $nice $mv /$1.$2 /$1.tmp
- fi
+ if [ -d $1.$2 ]; then
+ $nice $mv /$1.$2 /$1.tmp
+ fi
- for ((n=`echo "$2 - 1" | bc`; n >= 0; n--)); do
- if [ -d $1.$n ]; then
- dest=`echo "$n + 1" | bc`
- $nice $mv /$1.$n /$1.$dest
- $touch /$1.$dest
- fi
- done
+ for ((n=`echo "$2 - 1" | bc`; n >= 0; n--)); do
+ if [ -d $1.$n ]; then
+ dest=`echo "$n + 1" | bc`
+ $nice $mv /$1.$n /$1.$dest
+ $touch /$1.$dest
+ fi
+ done
- if [ -d $1.tmp ]; then
- $nice $mv /$1.tmp /$1.0
- fi
+ if [ -d $1.tmp ]; then
+ $nice $mv /$1.tmp /$1.0
+ fi
- if [ -d $1.1 ]; then
- $nice $cp -alf /$1.1/. /$1.0
- fi
+ if [ -d $1.1 ]; then
+ $nice $cp -alf /$1.1/. /$1.0
+ fi
}
function move_files {
-
- ref=$tmp/makesnapshot-mymv-$$;
- $touch -r $1 $ref;
- $mv $1 $2;
- $touch -r $ref $2;
- $rm $ref;
-
+ ref=$tmp/makesnapshot-mymv-$$;
+ $touch -r $1 $ref;
+ $mv $1 $2;
+ $touch -r $ref $2;
+ $rm $ref;
}
backupdir="$mountpoint/$backupdir"
# does $backupdir exists?
-if [ ! -d "$backupdir" ]; then
- error "Backupdir $backupdir does not exist"
- exit 1
+if [ ! -d "$backupdir" ]; then
+ error "Backupdir $backupdir does not exist"
+ exit 1
fi
# setup number of increments
if [ -z "$days" ]; then
- keep="4"
+ keep="4"
else
- keep="`echo $days - 1 | bc -l`"
+ keep="`echo $days - 1 | bc -l`"
fi
# lockfile setup
if [ ! -z "$lockfile" ]; then
- $touch $lockfile || warning "Could not create lockfile $lockfile"
+ $touch $lockfile || warning "Could not create lockfile $lockfile"
fi
# nicelevel setup
-if [ ! -z "$nicelevel" ]; then
- nice="nice -n $nicelevel"
-else
- nice=""
+if [ ! -z "$nicelevel" ]; then
+ nice="nice -n $nicelevel"
+else
+ nice=""
fi
# set mv procedure
if [ $enable_mv_timestamp_bug == "yes" ]; then
- mv=move_files
+ mv=move_files
fi
# set excludes
for path in $exclude; do
- EXCLUDES="$EXCLUDES --exclude=$path"
+ EXCLUDES="$EXCLUDES --exclude=$path"
done
echo "Starting backup at `date`" >> $log
@@ -162,19 +162,19 @@ echo "Starting backup at `date`" >> $log
# mount backup destination folder as read-write
if [ "$read_only" == "1" ] || [ "$read_only" == "yes" ]; then
- if [ -d "$mountpoint" ]; then
- mount -o remount,rw $mountpoint
- if (($?)); then
- error "Could not mount $mountpoint"
- exit 1
- fi
- fi
+ if [ -d "$mountpoint" ]; then
+ mount -o remount,rw $mountpoint
+ if (($?)); then
+ error "Could not mount $mountpoint"
+ exit 1
+ fi
+ fi
fi
# the backup procedure
if [ ! -d "$backupdir/$folder/$folder.0" ]; then
- mkdir -p $backupdir/$folder/$folder.0
+ mkdir -p $backupdir/$folder/$folder.0
fi
info "Rotating $backupdir/$folder/$folder..."
@@ -183,7 +183,7 @@ rotate $backupdir/$folder/$folder $keep
info "Wget'ing $SECTION on $backupdir/$folder/$folder.0..."
if [ ! -z "$badnwidth" ]; then
- limit_rate="--limit-rate=$badnwidth""k"
+ limit_rate="--limit-rate=$badnwidth""k"
fi
cd $backupdir/$folder/$folder.0
@@ -195,25 +195,25 @@ $touch $backupdir/$folder/$folder.0
# remount backup destination as read-only
if [ "$read_only" == "1" ] || [ "$read_only" == "yes" ]; then
- mount -o remount,ro $mountpoint
+ mount -o remount,ro $mountpoint
fi
# check partition for errors
if [ "$fscheck" == "1" ] || [ "$fscheck" == "yes" ]; then
- umount $mountpoint
- if (($?)); then
- warning "Could not umount $mountpoint to run fsck"
- else
- $nice $fsck -v -y $partition >> $log
- mount $mountpoint
- fi
+ umount $mountpoint
+ if (($?)); then
+ warning "Could not umount $mountpoint to run fsck"
+ else
+ $nice $fsck -v -y $partition >> $log
+ mount $mountpoint
+ fi
fi
# removes the lockfile
if [ ! -z "$lockfile" ]; then
- $rm $lockfile || warning "Could not remove lockfile $lockfile"
+ $rm $lockfile || warning "Could not remove lockfile $lockfile"
fi
echo "Finnishing backup at `date`" >> $log