aboutsummaryrefslogtreecommitdiff
path: root/handlers
diff options
context:
space:
mode:
authorintrigeri <intrigeri@boum.org>2011-05-15 19:29:28 +0200
committerintrigeri <intrigeri@boum.org>2011-05-15 19:29:28 +0200
commit7757ab245ae9397136b4cdff4f98e4a7cca7dc21 (patch)
tree96716645748d2bd6734df5483002047f21b69118 /handlers
parentbacaea7ad6a275db9b633c25afbc19f559b97c65 (diff)
parent69c0ec09c5e5eb9e166bc0f5c9a1ef702d0150d6 (diff)
downloadbackupninja-7757ab245ae9397136b4cdff4f98e4a7cca7dc21.tar.gz
backupninja-7757ab245ae9397136b4cdff4f98e4a7cca7dc21.tar.bz2
Merge commit 'backupninja-0.9.9' into debian
Diffstat (limited to 'handlers')
-rw-r--r--handlers/Makefile.am10
-rw-r--r--handlers/dup.helper.in22
-rw-r--r--handlers/dup.in114
-rw-r--r--handlers/ldap.in2
-rw-r--r--handlers/mysql.in6
-rw-r--r--handlers/pgsql.helper.in30
-rw-r--r--handlers/pgsql.in108
-rw-r--r--handlers/rdiff.helper.in2
-rw-r--r--handlers/rdiff.in12
-rw-r--r--handlers/rsync.in52
-rw-r--r--handlers/svn.in4
-rwxr-xr-xhandlers/sys.in6
-rw-r--r--handlers/tar.helper.in2
-rw-r--r--handlers/wget6
14 files changed, 250 insertions, 126 deletions
diff --git a/handlers/Makefile.am b/handlers/Makefile.am
index eaf4b9e..c9ad2ba 100644
--- a/handlers/Makefile.am
+++ b/handlers/Makefile.am
@@ -1,7 +1,7 @@
HANDLERS = dup dup.helper ldap ldap.helper maildir makecd \
makecd.helper mysql mysql.helper pgsql pgsql.helper rdiff \
- rdiff.helper rsync sh svn sys sys.helper trac
+ rdiff.helper rsync sh svn sys sys.helper trac tar tar.helper
CLEANFILES = $(HANDLERS)
@@ -90,3 +90,11 @@ trac: $(srcdir)/trac.in
rm -f trac
$(edit) $(srcdir)/trac.in > trac
+tar: $(srcdir)/tar.in
+ rm -f tar
+ $(edit) $(srcdir)/tar.in > tar
+
+tar.helper: $(srcdir)/tar.helper.in
+ rm -f tar.helper
+ $(edit) $(srcdir)/tar.helper.in > tar.helper
+
diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in
index e985c5e..7f82c2f 100644
--- a/handlers/dup.helper.in
+++ b/handlers/dup.helper.in
@@ -124,6 +124,7 @@ do_dup_dest() {
formItem "keep" "$dup_keep"
formItem "incremental" "$dup_incremental"
formItem "increments" "$dup_increments"
+ formItem "keepincroffulls" "$dup_keepincroffulls"
formItem "bandwidthlimit" "$dup_bandwidth"
formItem "sshoptions" "$dup_sshoptions"
formDisplay
@@ -141,8 +142,9 @@ do_dup_dest() {
dup_keep=${thereply[3]}
dup_incremental=${thereply[4]}
dup_increments=${thereply[5]}
- dup_bandwidth=${thereply[6]}
- dup_sshoptions=${thereply[7]}
+ dup_keepincroffulls=${thereply[6]}
+ dup_bandwidth=${thereply[7]}
+ dup_sshoptions=${thereply[8]}
done
set +o noglob
@@ -179,7 +181,7 @@ do_dup_gpg_signkey() {
dup_gpg_onekeypair=no
fi
- if [ "$dup_gpg_onekeypair" == "no" }; then
+ if [ "$dup_gpg_onekeypair" == "no" ]; then
# signkey ?
REPLY=
while [ -z "$REPLY" -o -z "$dup_gpg_signkey" ]; do
@@ -421,9 +423,18 @@ increments = $dup_increments
# (you can also use the time format of duplicity)
# 'keep = yes' means : do not delete old data, the remote host will take care of this
#keep = 60
+#keep = 1Y
#keep = yes
keep = $dup_keep
+# for how many full backups do we keep their later increments ;
+# default is all (keep all increments).
+# increments for older full backups will be deleted : only the more
+# recent ones (count provided) will be kept
+#keepincroffulls = all
+#keepincroffulls = 6
+keepincroffulls = $dup_keepincroffulls
+
# full destination URL, in duplicity format; if set, desturl overrides
# sshoptions, destdir, desthost and destuser; it also disables testconnect and
# bandwithlimit. For details, see duplicity manpage, section "URL FORMAT".
@@ -442,14 +453,14 @@ keep = $dup_keep
# bandwith limit, in kbit/s ; default is 0, i.e. no limit
# if using 'desturl' above, 'bandwidthlimit' must not be set
-# an example setting of 128 kbps would be:
+# an example setting of 128 Kbit/s would be:
#bandwidthlimit = 128
bandwidthlimit = $dup_bandwidth
# passed directly to ssh, scp (and sftp in duplicity >=0.4.2)
# warning: sftp does not support all scp options, especially -i; as
# a workaround, you can use "-o <SSHOPTION>"
-#sshoptions = -o IdentityFile=/root/.ssh/id_dsa_duplicity
+#sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
sshoptions = $dup_sshoptions
# put the backups under this destination directory
@@ -534,6 +545,7 @@ dup_wizard() {
dup_incremental=yes
dup_increments=30
dup_keep=60
+ dup_keepincroffulls=
dup_bandwidth=
dup_sshoptions=
dup_destdir="/backups/`hostname`"
diff --git a/handlers/dup.in b/handlers/dup.in
index 5216643..b58d34d 100644
--- a/handlers/dup.in
+++ b/handlers/dup.in
@@ -2,7 +2,7 @@
# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# duplicity script for backupninja
-# requires duplicity
+# requires duplicity >= 0.4.4, and >= 0.4.9 when using a custom tmpdir.
#
getconf options
@@ -26,6 +26,7 @@ setsection dest
getconf incremental yes
getconf increments 30
getconf keep 60
+getconf keepincroffulls all
getconf desturl
getconf awsaccesskeyid
getconf awssecretaccesskey
@@ -51,7 +52,7 @@ fi
### VServers
# If vservers are configured, check that the ones listed in $vsnames do exist.
-local usevserver=no
+usevserver=no
if [ $vservers_are_available = yes ]; then
if [ "$vsnames" = all ]; then
vsnames="$found_vservers"
@@ -106,35 +107,14 @@ duplicity_major="`echo $duplicity_version | @AWK@ -F '.' '{print $1}'`"
duplicity_minor="`echo $duplicity_version | @AWK@ -F '.' '{print $2}'`"
duplicity_sub="`echo $duplicity_version | @AWK@ -F '.' '{print $3}'`"
-### ssh/scp/sftp options
-# 1. duplicity >= 0.4.2 needs --sftp-command
-# (NB: sftp does not support the -l option)
-# 2. duplicity 0.4.3 to 0.4.9 replace --ssh-command with --ssh-options, which is
-# passed to scp and sftp commands by duplicity. We don't use it: since this
-# version does not use the ssh command anymore, we keep compatibility with
-# our previous config files by passing $sshoptions to --scp-command and
-# --sftp-command ourselves
-
+### ssh/scp/sftp options (duplicity < 0.4.3 is unsupported)
scpoptions="$sshoptions"
if [ "$bandwidthlimit" != 0 ]; then
[ -z "$desturl" ] || warning 'The bandwidthlimit option is not used when desturl is set.'
scpoptions="$scpoptions -l $bandwidthlimit"
fi
-
-# < 0.4.2 : only uses ssh and scp
-if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 2 ]; then
- execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --ssh-command 'ssh $sshoptions'"
-# >= 0.4.2 : also uses sftp, --sftp-command option is now supported
-else
- sftpoptions="$sshoptions"
- # == 0.4.2 : uses ssh, scp and sftp
- if [ "$duplicity_major" -eq 0 -a "$duplicity_minor" -eq 4 -a "$duplicity_sub" -eq 2 ]; then
- execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions' --ssh-command 'ssh $sshoptions'"
- # >= 0.4.3 : uses only scp and sftp, --ssh-command option is not supported anymore
- else
- execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions'"
- fi
-fi
+sftpoptions="$sshoptions"
+execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions'"
### Symmetric or asymmetric (public/private key pair) encryption
if [ -n "$encryptkey" ]; then
@@ -162,12 +142,7 @@ fi
# full backup.
# If incremental==no, force a full backup anyway.
if [ "$incremental" == "no" ]; then
- # before 0.4.4, full was an option and not a command
- if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 4 ]; then
- execstr_options="${execstr_options} --full"
- else
- execstr_command="full"
- fi
+ execstr_command="full"
else
# we're in incremental mode
if [ "$increments" != "keep" ]; then
@@ -185,7 +160,6 @@ if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -g
fi
### Temporary directory
-precmd=
if [ -n "$tmpdir" ]; then
if [ ! -d "$tmpdir" ]; then
info "Temporary directory ($tmpdir) does not exist, creating it."
@@ -194,7 +168,7 @@ if [ -n "$tmpdir" ]; then
chmod 0700 "$tmpdir"
fi
info "Using $tmpdir as TMPDIR"
- precmd="${precmd}TMPDIR=$tmpdir "
+ execstr_options="${execstr_options} --tempdir '$tmpdir'"
fi
### Archive directory
@@ -211,10 +185,6 @@ if [ "$keep" != "yes" ]; then
if [ "`echo $keep | tr -d 0-9`" == "" ]; then
keep="${keep}D"
fi
- # before 0.4.4, remove-older-than was an option and not a command
- if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 4 ]; then
- execstr_options="${execstr_options} --remove-older-than $keep"
- fi
fi
### Source
@@ -269,56 +239,76 @@ fi
### Cleanup commands (duplicity >= 0.4.4)
# cleanup
-if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 4 -a "$duplicity_sub" -ge 4 ]; then
- debug "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart"
+debug "duplicity cleanup --force $execstr_options $execstr_serverpart"
+if [ ! $test ]; then
+ export PASSPHRASE=$password
+ export FTP_PASSWORD=$ftp_password
+ output=`nice -n $nicelevel \
+ su -c \
+ "duplicity cleanup --force $execstr_options $execstr_serverpart 2>&1"`
+ exit_code=$?
+ if [ $exit_code -eq 0 ]; then
+ debug $output
+ info "Duplicity cleanup finished successfully."
+ else
+ debug $output
+ warning "Duplicity cleanup failed."
+ fi
+fi
+
+# remove-older-than
+if [ "$keep" != "yes" ]; then
+ debug "duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
- su -c \
- "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart 2>&1"`
+ su -c \
+ "duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart 2>&1"`
exit_code=$?
if [ $exit_code -eq 0 ]; then
debug $output
- info "Duplicity cleanup finished successfully."
+ info "Duplicity remove-older-than finished successfully."
else
debug $output
- warning "Duplicity cleanup failed."
+ warning "Duplicity remove-older-than failed."
fi
fi
fi
-# remove-older-than
+# remove-all-inc-of-but-n-full : remove increments of older full backups : only keep latest ones
if [ "$keep" != "yes" ]; then
- if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 4 -a "$duplicity_sub" -ge 4 ]; then
- debug "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"
- if [ ! $test ]; then
- export PASSPHRASE=$password
- export FTP_PASSWORD=$ftp_password
- output=`nice -n $nicelevel \
- su -c \
- "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart 2>&1"`
- exit_code=$?
- if [ $exit_code -eq 0 ]; then
- debug $output
- info "Duplicity remove-older-than finished successfully."
- else
- debug $output
- warning "Duplicity remove-older-than failed."
+ if [ "$keepincroffulls" != "all" ]; then
+ if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 9 -a "$duplicity_sub" -ge 10 ]; then
+ debug "$precmd duplicity remove-all-inc-of-but-n-full $keepincroffulls --force $execstr_options $execstr_serverpart"
+ if [ ! $test ]; then
+ export PASSPHRASE=$password
+ export FTP_PASSWORD=$ftp_password
+ output=`nice -n $nicelevel \
+ su -c \
+ "$precmd duplicity remove-all-inc-of-but-n-full $keepincroffulls --force $execstr_options $execstr_serverpart 2>&1"`
+ exit_code=$?
+ if [ $exit_code -eq 0 ]; then
+ debug $output
+ info "Duplicity remove-all-inc-of-but-n-full finished successfully."
+ else
+ debug $output
+ warning "Duplicity remove-all-inc-of-but-n-full failed."
+ fi
fi
fi
fi
fi
### Backup command
-debug "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart"
+debug "duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart"
if [ ! $test ]; then
outputfile=`maketemp backupout`
export PASSPHRASE=$password
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
- "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"`
+ "duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"`
exit_code=$?
debug $output
cat $outputfile | (while read output ; do
diff --git a/handlers/ldap.in b/handlers/ldap.in
index 83307ee..600f172 100644
--- a/handlers/ldap.in
+++ b/handlers/ldap.in
@@ -91,7 +91,7 @@ if [ "$ldif" == "yes" ]; then
execstr="$execstr > $dumpdir/$dbsuffix.ldif"
fi
debug "$execstr"
- output=`su root -c "set -o pipefail ; $execstr" 2>&1`
+ output=`su root -s /bin/bash -c "set -o pipefail ; $execstr" 2>&1`
code=$?
if [ "$code" == "0" ]; then
debug $output
diff --git a/handlers/mysql.in b/handlers/mysql.in
index 0282046..185a98a 100644
--- a/handlers/mysql.in
+++ b/handlers/mysql.in
@@ -24,8 +24,8 @@ getconf configfile /etc/mysql/debian.cnf
# Decide if the handler should operate on a vserver or on the host.
# In the former case, check that $vsname exists and is running.
-local usevserver=no
-local vroot
+usevserver=no
+vroot=''
if [ $vservers_are_available = yes ]; then
if [ -n "$vsname" ]; then
# does it exist ?
@@ -303,7 +303,7 @@ then
debug "su $user -c \"$execstr\""
if [ ! $test ]
then
- output=`su $user -c "set -o pipefail ; $execstr" 2>&1`
+ output=`su $user -s /bin/bash -c "set -o pipefail ; $execstr" 2>&1`
code=$?
if [ "$code" == "0" ]
then
diff --git a/handlers/pgsql.helper.in b/handlers/pgsql.helper.in
index ff1cfd4..82e6b48 100644
--- a/handlers/pgsql.helper.in
+++ b/handlers/pgsql.helper.in
@@ -67,6 +67,21 @@ pgsql_wizard() {
pgsql_compress="compress = no"
fi
+ # pg_dump format, defaults to plain, custom is recommended by PostgreSQL
+ menuBox "$pgsql_title" "Choose a pg_dump format:" \
+ plain "Default plain-text sql script, use with psql." \
+ tar "More flexible than the plain, use with pg_restore." \
+ custom "The most flexible format, use with pg_restore."
+ if [ $? = 0 ]; then
+ result="$REPLY"
+ case "$result" in
+ "tar") pgsql_format="format = tar";;
+ "custom") pgsql_format="format = custom";;
+ *) pgsql_format = "format = plain";;
+ esac
+ fi
+
+
# write config file
get_next_filename $configdirectory/20.pgsql
cat >> $next_filename <<EOF
@@ -97,7 +112,22 @@ $pgsql_databases
# if yes, compress the pg_dump/pg_dumpall output.
$pgsql_compress
+# format = < plain | tar | custom > (default = plain)
+# plain - Output a plain-text SQL script file with the extension .sql.
+# When dumping all databases, a single file is created via pg_dumpall.
+# tar - Output a tar archive suitable for input into pg_restore. More
+# flexible than plain and can be manipulated by standard Unix tools
+# such as tar. Creates a globals.sql file and an archive per database.
+# custom - Output a custom PostgreSQL pg_restore archive. This is the most
+# flexible format allowing selective import and reordering of database
+# objects at the time the database is restored via pg_restore. This
+# option creates a globals.sql file containing the cluster role and
+# other information dumped by pg_dumpall -g and a pg_restore file
+# per selected database. See the pg_dump and pg_restore man pages.
+$pgsql_format
+
### You can also set the following variables in backupninja.conf:
+# PSQL: psql path (default: /usr/bin/psql)
# PGSQLDUMP: pg_dump path (default: /usr/bin/pg_dump)
# PGSQLDUMPALL: pg_dumpall path (default: /usr/bin/pg_dumpall)
# PGSQLUSER: user running PostgreSQL (default: postgres)
diff --git a/handlers/pgsql.in b/handlers/pgsql.in
index 0b7badf..a50d3c7 100644
--- a/handlers/pgsql.in
+++ b/handlers/pgsql.in
@@ -8,13 +8,15 @@ getconf backupdir /var/backups/postgres
getconf databases all
getconf compress yes
getconf vsname
+# format maps to pg_dump --format= option, old/default was plain
+getconf format plain
localhost=`hostname`
# Decide if the handler should operate on a vserver or on the host.
# In the former case, check that $vsname exists and is running.
-local usevserver=no
-local vroot
+usevserver=no
+vroot=''
if [ $vservers_are_available = yes ]; then
if [ -n "$vsname" ]; then
# does it exist ?
@@ -35,17 +37,31 @@ fi
# Make sure that the system to backup has the needed executables
if [ $usevserver = yes ]; then
debug "Examining vserver '$vsname'."
- if [ "$databases" == "all" ]; then
+ if [ "$databases" == "all" ] && [ "$format" = "plain" ]; then
[ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMPALL`" ] || \
fatal "Can't find $PGSQLDUMPALL in vserver $vsname."
+ elif [ "$format" != "plain" ]; then
+ [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMPALL`" ] || \
+ fatal "Can't find $PGSQLDUMPALL in vserver $vsname."
+ [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMP`" ] || \
+ fatal "Can't find $PGSQLDUMP in vserver $vsname."
+ [ -x "$vroot`$VSERVER $vsname exec which $PSQL`" ] || \
+ fatal "Can't find $PSQL in vserver $vsname."
else
[ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMP`" ] || \
fatal "Can't find $PGSQLDUMP in vserver $vsname."
fi
else
- if [ "$databases" == "all" ]; then
+ if [ "$databases" == "all" ] && [ "$format" = "plain" ]; then
+ [ -x "`which $PGSQLDUMPALL`" ] || \
+ fatal "Can't find $PGSQLDUMPALL."
+ elif [ "$format" != "plain" ]; then
[ -x "`which $PGSQLDUMPALL`" ] || \
fatal "Can't find $PGSQLDUMPALL."
+ [ -x "`which $PGSQLDUMP`" ] || \
+ fatal "Can't find $PGSQLDUMP."
+ [ -x "`which $PSQL`" ] || \
+ fatal "Can't find $PSQL."
else
[ -x "`which $PGSQLDUMP`" ] || \
fatal "Can't find $PGSQLDUMP."
@@ -71,17 +87,41 @@ chown $pguid $vroot$backupdir
debug "chmod 700 $vroot$backupdir"
chmod 700 $vroot$backupdir
+
+# If we are using the custom (best) or tar pg_dump format, and
+# dumping "all" databases, we will substitute "all" for a list
+# of all non-template databases to avoid the use of pg_dumpall.
+dumpglobals="no"
+if [ "$databases" = "all" ] && [ "$format" != "plain" ]; then
+ cmdprefix=""
+ if [ "$usevserver" = "yes" ]; then
+ cmdprefix="$VSERVER $vsname exec "
+ fi
+ execstr="${cmdprefix} su - $PGSQLUSER -c 'psql -AtU $PGSQLUSER -c \"SELECT datname FROM pg_database WHERE NOT datistemplate\"'"
+ debug execstr
+ dblist=""
+ for db in $(eval $execstr 2>&1); do
+ dblist="$dblist $db"
+ done
+ if [ "$dblist" != "" ]; then
+ databases="$dblist"
+ fi
+ # Dump globals (pg_dumpall -g) for roles and tablespaces
+ dumpglobals="yes"
+fi
+
+
# if $databases = all, use pg_dumpall
if [ "$databases" == "all" ]; then
if [ $usevserver = yes ]; then
if [ "$compress" == "yes" ]; then
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${vsname}.sql.gz'\""
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${vsname}.sql.gz'\""
else
execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${vsname}.sql'\""
fi
else
if [ "$compress" == "yes" ]; then
- execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${localhost}-all.sql.gz'\""
+ execstr="su - $PGSQLUSER -s /bin/bash -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${localhost}-all.sql.gz'\""
else
execstr="su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${localhost}-all.sql'\""
fi
@@ -101,20 +141,58 @@ if [ "$databases" == "all" ]; then
# else use pg_dump on each specified database
else
- for db in $databases; do
+ # If we're not doing plain format, database=all may now be database=list
+ # so we track the database=all selection in dumpglobals which tells us
+ # to also dump the roles and tablespaces via pg_dumpall -g
+ if [ "$dumpglobals" = "yes" ]; then
+ globalscmd=""
+ if [ "$compress" == "yes" ]; then
+ globalscmd="set -o pipefail ; $PGSQLDUMPALL -g | $GZIP $GZIP_OPTS > '$backupdir/globals.sql.gz'"
+ else
+ globalscmd="$PGSQLDUMPALL -g > '$backupdir/globals.sql'"
+ fi
if [ $usevserver = yes ]; then
- if [ "$compress" == "yes" ]; then
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\""
- else
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMP $db | > '$backupdir/${db}.sql'\""
- fi
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"$globalscmd\""
else
- if [ "$compress" == "yes" ]; then
- execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\""
+ execstr="su - $PGSQLUSER -s /bin/bash -c \"$globalscmd\""
+ fi
+ debug "$execstr"
+ if [ ! $test ]; then
+ output=`eval $execstr 2>&1`
+ code=$?
+ if [ "$code" == "0" ]; then
+ debug $output
+ info "Successfully finished pgsql globals (roles and tablespaces) dump"
else
- execstr="su - $PGSQLUSER -c \"$PGSQLDUMP $db > '$backupdir/${db}.sql'\""
+ warning $output
+ warning "Failed to dump pgsql globals (roles and tablespaces)"
fi
fi
+ fi
+ for db in $databases; do
+ dumpext="sql"
+ if [ "$format" != "plain" ]; then
+ dumpext="pg_dump"
+ fi
+ # To better support the backupninja global GZIP and rsync-friendly GZIP_OPTS
+ # the custom archive format is told to disable compression. The plain format
+ # is uncompressed by default and the tar format doesn't support pg_dump compression.
+ disablecustomcompress=""
+ if [ "$format" = "custom" ]; then
+ disablecustomcompress="--compress=0"
+ fi
+ dumpcmd=""
+ globalscmd=""
+ if [ "$compress" == "yes" ]; then
+ dumpcmd="set -o pipefail ; $PGSQLDUMP --format=$format ${disablecustomcompress} $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.${dumpext}.gz'"
+ else
+ dumpcmd="$PGSQLDUMP --format=$format ${disablecustomcompress} $db | > '$backupdir/${db}.${dumpext}'"
+ fi
+ if [ $usevserver = yes ]; then
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"$dumpcmd\""
+ else
+ execstr="su - $PGSQLUSER -s /bin/bash -c \"$dumpcmd\""
+ fi
debug "$execstr"
if [ ! $test ]; then
output=`eval $execstr 2>&1`
diff --git a/handlers/rdiff.helper.in b/handlers/rdiff.helper.in
index b5bb8bb..039799e 100644
--- a/handlers/rdiff.helper.in
+++ b/handlers/rdiff.helper.in
@@ -168,7 +168,7 @@ do_rdiff_ssh_con() {
if [ ! -f /root/.ssh/id_dsa.pub -a ! -f /root/.ssh/id_rsa.pub ]; then
echo "Creating local root's ssh key"
- ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
+ ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsa -N ""
echo "Done. hit return to continue"
read
fi
diff --git a/handlers/rdiff.in b/handlers/rdiff.in
index 60386fa..c2f5aa0 100644
--- a/handlers/rdiff.in
+++ b/handlers/rdiff.in
@@ -115,7 +115,7 @@ fi
### CHECK CONFIG ###
# If vservers are configured, check that the ones listed in $vsnames do exist.
-local usevserver=no
+usevserver=no
if [ $vservers_are_available = yes ]; then
if [ "$vsnames" = all ]; then
vsnames="$found_vservers"
@@ -219,7 +219,10 @@ SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
for i in $exclude; do
str="${i//__star__/*}"
- execstr="${execstr}--exclude '$str' "
+ case "$str" in
+ @*) execstr="${execstr}--exclude-globbing-filelist '${str#@}' " ;;
+ *) execstr="${execstr}--exclude '$str' " ;;
+ esac
done
IFS=$SAVEIFS
# includes
@@ -228,7 +231,10 @@ IFS=$(echo -en "\n\b")
for i in $include; do
[ "$i" != "/" ] || fatal "Sorry, you cannot use 'include = /'"
str="${i//__star__/*}"
- execstr="${execstr}--include '$str' "
+ case "$str" in
+ @*) execstr="${execstr}--include-globbing-filelist '${str#@}' " ;;
+ *) execstr="${execstr}--include '$str' " ;;
+ esac
done
IFS=$SAVEIFS
diff --git a/handlers/rsync.in b/handlers/rsync.in
index fea7e7b..d93411a 100644
--- a/handlers/rsync.in
+++ b/handlers/rsync.in
@@ -60,7 +60,7 @@
# exclude_vserver = vserver-name (valid only if vservers = yes on backupninja.conf)
# numericids = when set to 1, use numeric ids instead of user/group mappings on rsync
# compress = if set to 1, compress data on rsync (remote source only)
-# bandwidthlimit = set a badnwidth limit in kbps (remote source only)
+# bandwidthlimit = set a badnwidth limit in KB/s (remote source only)
# remote_rsync = remote rsync program (remote source only)
# id_file = ssh key file (remote source only)
# batch = set to "yes" to rsync use a batch file as source
@@ -79,7 +79,7 @@
# port = remote port number (remote destination only)
# user = remote user name (remote destination only)
# id_file = ssh key file (remote destination only)
-# bandwidthlimit = set a badnwidth limit in kbps (remote destination only)
+# bandwidthlimit = set a badnwidth limit in KB/s (remote destination only)
# remote_rsync = remote rsync program (remote dest only)
# batch = set to "yes" to rsync write a batch file from the changes
# batchbase = folder where the batch file should be written
@@ -253,7 +253,7 @@ function eval_config {
if [ -z "$days" ]; then
keep="4"
else
- keep="`echo $days - 1 | bc -l`"
+ keep=$[$days - 1]
fi
fi
@@ -302,9 +302,9 @@ function rotate_short {
$nice $mv /$folder.$keep /$folder.tmp
fi
- for ((n=`echo "$keep - 1" | bc`; n >= 0; n--)); do
+ for ((n=$[$keep - 1]; n >= 0; n--)); do
if [ -d $folder.$n ]; then
- dest=`echo "$n + 1" | bc`
+ dest=$[$n + 1]
$nice $mv /$folder.$n /$folder.$dest
$touch /$folder.$dest
mkdir -p $metadata/`basename $folder`.$dest
@@ -384,14 +384,14 @@ function rotate_long {
for rottype in daily weekly monthly; do
seconds=$((seconds_${rottype}))
-
dir="$backuproot/$rottype"
- metadata="$backuproot/metadata/$rottype.1"
- mkdir -p $metadata
+ metadata="$backuproot/metadata/$rottype"
+
+ mkdir -p $metadata.1
if [ ! -d $dir.1 ]; then
echo "Debug: $dir.1 does not exist, skipping."
continue 1
- elif [ ! -f $metadata/created ] && [ ! -f $metadata/rotated ]; then
+ elif [ ! -f $metadata.1/created ] && [ ! -f $metadata.1/rotated ]; then
echo "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
@@ -401,10 +401,10 @@ function rotate_long {
[ "$oldest" == "" ] && oldest=0
for (( i=$oldest; i > 0; i-- )); do
if [ -d $dir.$i ]; then
- if [ -f $metadata/created ]; then
- created=`tail -1 $metadata/created`
- elif [ -f $metadata/rotated ]; then
- created=`tail -1 $metadata/rotated`
+ if [ -f $metadata.$i/created ]; then
+ created=`tail -1 $metadata.$i/created`
+ elif [ -f $metadata.$i/rotated ]; then
+ created=`tail -1 $metadata.$i/rotated`
else
created=0
fi
@@ -414,8 +414,8 @@ function rotate_long {
if [ ! -d $dir.$next ]; then
echo "Debug: $rottype.$i --> $rottype.$next"
$nice mv $dir.$i $dir.$next
- mkdir -p $backuproot/metadata/$rottype.$next
- date +%c%n%s > $backuproot/metadata/$rottype.$next/rotated
+ mkdir -p $metadata.$next
+ date +%c%n%s > $metadata.$next/rotated
else
echo "Debug: skipping rotation of $dir.$i because $dir.$next already exists."
fi
@@ -485,14 +485,14 @@ function rotate_long_remote {
for rottype in daily weekly monthly; do
seconds=\$((seconds_\${rottype}))
-
dir="$backuproot/\$rottype"
- metadata="$backuproot/metadata/\$rottype.1"
- mkdir -p \$metadata
+ metadata="$backuproot/metadata/\$rottype"
+
+ mkdir -p \$metadata.1
if [ ! -d \$dir.1 ]; then
echo "Debug: \$dir.1 does not exist, skipping."
continue 1
- elif [ ! -f \$metadata/created ] && [ ! -f \$metadata/rotated ]; then
+ elif [ ! -f \$metadata.1/created ] && [ ! -f \$metadata.1/rotated ]; then
echo "Warning: metadata does not exist for \$dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
@@ -502,10 +502,10 @@ function rotate_long_remote {
[ "\$oldest" == "" ] && oldest=0
for (( i=\$oldest; i > 0; i-- )); do
if [ -d \$dir.\$i ]; then
- if [ -f \$metadata/created ]; then
- created=\`tail -1 \$metadata/created\`
- elif [ -f \$metadata/rotated ]; then
- created=\`tail -1 \$metadata/rotated\`
+ if [ -f \$metadata.\$i/created ]; then
+ created=\`tail -1 \$metadata.\$i/created\`
+ elif [ -f \$metadata.\$i/rotated ]; then
+ created=\`tail -1 \$metadata.\$i/rotated\`
else
created=0
fi
@@ -515,8 +515,8 @@ function rotate_long_remote {
if [ ! -d \$dir.\$next ]; then
echo "Debug: \$rottype.\$i --> \$rottype.\$next"
$nice mv \$dir.\$i \$dir.\$next
- mkdir -p $backuproot/metadata/\$rottype.\$next
- date +%c%n%s > $backuproot/metadata/\$rottype.\$next/rotated
+ mkdir -p \$metadata.\$next
+ date +%c%n%s > \$metadata.\$next/rotated
else
echo "Debug: skipping rotation of \$dir.\$i because \$dir.\$next already exists."
fi
@@ -528,7 +528,7 @@ function rotate_long_remote {
done
max=\$((keepdaily+1))
- if [ \$keepweekly -gt 0 -a -d $backuproot/daily.\$max -a ! -d \$backuproot/weekly.1 ]; then
+ if [ \$keepweekly -gt 0 -a -d $backuproot/daily.\$max -a ! -d $backuproot/weekly.1 ]; then
echo "Debug: daily.\$max --> weekly.1"
$nice mv $backuproot/daily.\$max $backuproot/weekly.1
mkdir -p $backuproot/metadata/weekly.1
diff --git a/handlers/svn.in b/handlers/svn.in
index 5e5531a..bb70eee 100644
--- a/handlers/svn.in
+++ b/handlers/svn.in
@@ -14,8 +14,8 @@ error=0
# Decide if the handler should operate on a vserver or on the host.
# In the former case, check that $vsname exists and is running.
-local usevserver=no
-local vroot
+usevserver=no
+vroot=''
if [ $vservers_are_available = yes ]; then
if [ -n "$vsname" ]; then
# does it exist ?
diff --git a/handlers/sys.in b/handlers/sys.in
index fcf3e31..74133a3 100755
--- a/handlers/sys.in
+++ b/handlers/sys.in
@@ -103,7 +103,7 @@ getconf lvm no
getconf vsnames all
# If vservers are configured, check that the ones listed in $vsnames are running.
-local usevserver=no
+usevserver=no
if [ $vservers_are_available = yes ]; then
if [ "$vsnames" = all ]; then
vsnames="$found_vservers"
@@ -350,7 +350,7 @@ STATUS="Getting kernel version:"
catifexec "/bin/uname" "-a"
STATUS="Checking module information:"
catifexec "/sbin/lsmod"
-for x in $(/sbin/lsmod | /bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
+for x in $(/sbin/lsmod | /usr/bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
) ; do
STATUS="Checking module information $x:"
catifexec "/sbin/modinfo" "$x"
@@ -542,7 +542,7 @@ fi
#
if [ "$hardware" == "yes" ]; then
- if [ "dohwinfo" == "yes" ]; then
+ if [ "$dohwinfo" == "yes" ]; then
if [ -f $hardwarefile ]; then
rm $hardwarefile
fi
diff --git a/handlers/tar.helper.in b/handlers/tar.helper.in
index cdbe03a..4a483be 100644
--- a/handlers/tar.helper.in
+++ b/handlers/tar.helper.in
@@ -17,7 +17,7 @@ tar_wizard() {
tar_backupname="backupname = $REPLY"
backupname="$REPLY"
- inputBox "$tar_title" "Directory where to store the backups" "/net/backups/$backupname"
+ inputBox "$tar_title" "Directory where to store the backups" "/var/backups/tar/$backupname"
[ $? = 1 ] && return
tar_backupdir="backupdir = $REPLY"
diff --git a/handlers/wget b/handlers/wget
index ebb391e..67425fc 100644
--- a/handlers/wget
+++ b/handlers/wget
@@ -88,9 +88,9 @@ function rotate {
$nice $mv /$1.$2 /$1.tmp
fi
- for ((n=`echo "$2 - 1" | bc`; n >= 0; n--)); do
+ for ((n=$[$2 - 1]; n >= 0; n--)); do
if [ -d $1.$n ]; then
- dest=`echo "$n + 1" | bc`
+ dest=$[$n + 1]
$nice $mv /$1.$n /$1.$dest
$touch /$1.$dest
fi
@@ -128,7 +128,7 @@ fi
if [ -z "$days" ]; then
keep="4"
else
- keep="`echo $days - 1 | bc -l`"
+ keep=$[$days - 1]
fi
# lockfile setup