aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorintrigeri <intrigeri@boum.org>2011-05-15 19:29:28 +0200
committerintrigeri <intrigeri@boum.org>2011-05-15 19:29:28 +0200
commit7757ab245ae9397136b4cdff4f98e4a7cca7dc21 (patch)
tree96716645748d2bd6734df5483002047f21b69118
parentbacaea7ad6a275db9b633c25afbc19f559b97c65 (diff)
parent69c0ec09c5e5eb9e166bc0f5c9a1ef702d0150d6 (diff)
downloadbackupninja-7757ab245ae9397136b4cdff4f98e4a7cca7dc21.tar.gz
backupninja-7757ab245ae9397136b4cdff4f98e4a7cca7dc21.tar.bz2
Merge commit 'backupninja-0.9.9' into debian
-rw-r--r--AUTHORS9
-rw-r--r--ChangeLog56
-rw-r--r--README4
-rw-r--r--configure.in2
-rw-r--r--etc/backupninja.conf.in1
-rw-r--r--examples/example.dup14
-rw-r--r--examples/example.pgsql13
-rw-r--r--examples/example.rdiff7
-rw-r--r--examples/example.rsync2
-rw-r--r--handlers/Makefile.am10
-rw-r--r--handlers/dup.helper.in22
-rw-r--r--handlers/dup.in114
-rw-r--r--handlers/ldap.in2
-rw-r--r--handlers/mysql.in6
-rw-r--r--handlers/pgsql.helper.in30
-rw-r--r--handlers/pgsql.in108
-rw-r--r--handlers/rdiff.helper.in2
-rw-r--r--handlers/rdiff.in12
-rw-r--r--handlers/rsync.in52
-rw-r--r--handlers/svn.in4
-rwxr-xr-xhandlers/sys.in6
-rw-r--r--handlers/tar.helper.in2
-rw-r--r--handlers/wget6
-rw-r--r--man/backupninja.12
-rwxr-xr-xsrc/backupninja.in52
25 files changed, 387 insertions, 151 deletions
diff --git a/AUTHORS b/AUTHORS
index 2a7ff46..d50e69d 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -21,7 +21,7 @@ garcondumonde@riseup.net
Martin Krafft madduck@debian.org -- admingroup patch
Anarcat -- lotsa patches
Jamie McClelland -- cstream patches
-ale -- ldap cleanup
+ale -- ldap cleanup, rdiff support for reading include/exclude from files
Sami Haahtinen <ressu@ressukka.net>
Matthew Palmer -- mysql enhancements
romain.tartiere@healthgrid.org -- ldap fixes
@@ -29,10 +29,13 @@ Adam Monsen - spec file updates
Matthew Palmer <mpalmer@debian.org> -- halt loglevel feature
dan@garthwaite.org -- reportspace bugfix
Tuomas Jormola <tj@solitudo.net> -- "when = manual" option
-Ian Beckwith <ianb@erislabs.net> -- dup bandwidthlimit fix
-Olivier Berger <oberger@ouvaton.org> -- much work on the dup handler
+Ian Beckwith <ianb@erislabs.net> -- dup bandwidthlimit fix, dup helper fix
+Olivier Berger <oberger@ouvaton.org> -- numerous contributions
stefan <s.freudenberg@jpberlin.de> -- dup support for Amazon S3 buckets
maniacmartin <martin@maniacmartin.com> -- rdiff confusing error message fix
Chris Nolan <chris@cenolan.com> -- maildir subdirectory expansion
Dan Carley -- mysql bugfix
Jordi Mallach <jordi@debian.org> -- do not error when no jobs are configured
+Jacob Anawalt <jlanawalt@gmail.com> -- pg_dump format option
+Sergio Talens-Oliag <sto@debian.org> -- pipefail fixes
+Bruno Bigras <bigras.bruno@gmail.com> -- enable tar handler in the build system
diff --git a/ChangeLog b/ChangeLog
index 972055f..f4a38f5 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,7 +1,63 @@
+version 0.9.9 -- May 15, 2011
+ backupninja changes
+ . Use locking to avoid running concurrent instances of the same backup
+ action. Thanks to Olivier Berger <oberger@ouvaton.org> for the patch.
+ (Closes: #511300)
+ handler changes
+ all handlers:
+ . Stop using "local VAR" outside functions. (Closes: #530647)
+ dup:
+ . Use --tempdir option rather than TMPDIR environment variable.
+ (Closes Roundup bug #598)
+ . Remove support for duplicity < 0.4.4. Even etch-backports has
+ a newer one.
+ . Now support remove-all-inc-but-n-full command for duplicity >=
+ 0.9.10 to allow removal of increments for older full backups.
+ Thanks to Olivier Berger <oberger@ouvaton.org> for the patch.
+ (Closes Redmine bug #2492)
+ (Closes: #603478)
+ ldap:
+ . Fix reliance on bash for pipefail.
+ mysql:
+ . Fix reliance on bash for pipefail.
+ Thanks to Sergio Talens-Oliag <sto@debian.org> for the patch.
+ (Closes: #602374)
+ postgresql:
+ . Support various pg_dump formats in addition to pg_dumpall.
+ Thanks to Jacob Anawalt <jlanawalt@gmail.com> for the patch.
+ (Closes Roundup bug #2534)
+ . Fix reliance on bash for pipefail.
+ rdiff:
+ . Support reading include/exclude patterns from files using the
+ "include @/etc/backup_includes" syntax (Closes Roundup bug
+ #2370). Thanks to ale for the patch.
+ rsync:
+ . Fix long rotation.
+ . Make units clearer (Closes Redmine bug #2737)
+ . Do arithmetic using bash rather than bc (Closes: #603173)
+ sys:
+ . Fix hwinfo (Closes: #625501)
+ . Fix gathering of information about loaded modules:
+ cut is in /usr/bin actually.
+ tar:
+ . Install by default. (Closes Redmine bug #2907)
+ helper changes
+ dup:
+ . Fix separate signing key usecase. Thanks to Ian Beckwith for
+ the patch.
+ . Make units clearer (Closes Redmine bug #2737)
+ rdiff:
+ . Generate 4096 bits RSA keys.
+ tar:
+ . Install by default. (Closes Redmine bug #2907)
+ documentation changes
+ . Recommend using 4096 bits RSA keys everywhere.
+
version 0.9.8.1 -- October 31, 2010 (boo!)
backupninja changes
. Do not error out when no job is configured. Thanks to Jordi Mallach
<jordi@debian.org> for the patch (Closes: #597684)
+ handler changes
sys:
. Route around broken vgcfgbackup not able to handle multiple VG arguments
diff --git a/README b/README
index 8b01add..41d4186 100644
--- a/README
+++ b/README
@@ -201,8 +201,8 @@ In order for rdiff-backup to sync files over ssh unattended, you must
create ssh keys on the source server and copy the public key to the
remote user's authorized keys file. For example:
- root@srchost# ssh-keygen -t dsa
- root@srchost# ssh-copy-id -i /root/.ssh/id_dsa.pub backup@desthost
+ root@srchost# ssh-keygen -t rsa -b 4096
+ root@srchost# ssh-copy-id -i /root/.ssh/id_rsa.pub backup@desthost
Now, you should be able to ssh from user 'root' on srchost to
user 'backup' on desthost without specifying a password.
diff --git a/configure.in b/configure.in
index dd128c1..bc43c9a 100644
--- a/configure.in
+++ b/configure.in
@@ -3,7 +3,7 @@
# The maintainer mode is causing me grief with newest versions of autotools
#AM_MAINTAINER_MODE
-AC_INIT([backupninja],[0.9.8.1],[backupninja@lists.riseup.net])
+AC_INIT([backupninja],[0.9.9],[backupninja@lists.riseup.net])
AC_CONFIG_SRCDIR([src/backupninja.in])
AM_INIT_AUTOMAKE
diff --git a/etc/backupninja.conf.in b/etc/backupninja.conf.in
index dee9fff..7e3d347 100644
--- a/etc/backupninja.conf.in
+++ b/etc/backupninja.conf.in
@@ -83,6 +83,7 @@ vservers = no
# MYSQL=/usr/bin/mysql
# MYSQLHOTCOPY=/usr/bin/mysqlhotcopy
# MYSQLDUMP=/usr/bin/mysqldump
+# PSQL=/usr/bin/psql
# PGSQLDUMP=/usr/bin/pg_dump
# PGSQLDUMPALL=/usr/bin/pg_dumpall
# GZIP=/bin/gzip
diff --git a/examples/example.dup b/examples/example.dup
index 0ed5b2a..5e6b424 100644
--- a/examples/example.dup
+++ b/examples/example.dup
@@ -164,6 +164,14 @@ exclude = /var/cache/backupninja/duplicity
## Default:
# keep = 60
+# for how many full backups do we keep their later increments ;
+# default is all (keep all increments).
+# increments for older full backups will be deleted : only the more
+# recent ones (count provided) will be kept
+#
+## Default:
+# keepincroffulls = all
+
## full destination URL, in duplicity format; if set, desturl overrides
## sshoptions, destdir, desthost and destuser; it also disables testconnect and
## bandwithlimit. For details, see duplicity manpage, section "URL FORMAT", some
@@ -191,9 +199,9 @@ exclude = /var/cache/backupninja/duplicity
## Default:
# ftp_password =
-## bandwith limit, in kbit/s ; default is 0, i.e. no limit
+## bandwith limit, in Kbit/s ; default is 0, i.e. no limit
## if using 'desturl' above, 'bandwidthlimit' must not be set
-## an example setting of 128 kbps would be:
+## an example setting of 128 Kbit/s would be:
## bandwidthlimit = 128
##
## Default:
@@ -203,7 +211,7 @@ exclude = /var/cache/backupninja/duplicity
## warning: sftp does not support all scp options, especially -i; as
## a workaround, you can use "-o <SSHOPTION>"
## an example setting would be:
-## sshoptions = -o IdentityFile=/root/.ssh/id_dsa_duplicity
+## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
##
## Default:
# sshoptions =
diff --git a/examples/example.pgsql b/examples/example.pgsql
index 42f045e..5a4e02f 100644
--- a/examples/example.pgsql
+++ b/examples/example.pgsql
@@ -17,6 +17,19 @@
# compress = < yes | no > (default = yes)
# if yes, compress the pg_dump/pg_dumpall output.
+# format = < plain | tar | custom > (default = plain)
+# plain - Output a plain-text SQL script file with the extension .sql.
+# When dumping all databases, a single file is created via pg_dumpall.
+# tar - Output a tar archive suitable for input into pg_restore. More
+# flexible than plain and can be manipulated by standard Unix tools
+# such as tar. Creates a globals.sql file and an archive per database.
+# custom - Output a custom PostgreSQL pg_restore archive. This is the most
+# flexible format allowing selective import and reordering of database
+# objects at the time the database is restored via pg_restore. This
+# option creates a globals.sql file containing the cluster role and
+# other information dumped by pg_dumpall -g and a pg_restore file
+# per selected database. See the pg_dump and pg_restore man pages.
+
### You can also set the following variables in /etc/backupninja.conf:
# PGSQLDUMP: pg_dump path (default: /usr/bin/pg_dump)
# PGSQLDUMPALL: pg_dumpall path (default: /usr/bin/pg_dumpall)
diff --git a/examples/example.rdiff b/examples/example.rdiff
index 5adecd8..e8ce542 100644
--- a/examples/example.rdiff
+++ b/examples/example.rdiff
@@ -161,4 +161,9 @@ exclude = /var/cache/backupninja/duplicity
## Default:
# user =
-
+## passed directly to ssh
+## an example setting would be:
+## sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
+##
+## Default:
+# sshoptions =
diff --git a/examples/example.rsync b/examples/example.rsync
index 9053f3e..3c280ba 100644
--- a/examples/example.rsync
+++ b/examples/example.rsync
@@ -89,7 +89,7 @@ exclude_vserver = excluded_vserver2
# if set to 1, compress data on rsync (remote source only)
#compress = 0
-# set a badnwidth limit in kbps (remote source only)
+# set a badnwidth limit in KB/s (remote source only)
#bandwidthlimit =
# remote rsync program (remote source only)
diff --git a/handlers/Makefile.am b/handlers/Makefile.am
index eaf4b9e..c9ad2ba 100644
--- a/handlers/Makefile.am
+++ b/handlers/Makefile.am
@@ -1,7 +1,7 @@
HANDLERS = dup dup.helper ldap ldap.helper maildir makecd \
makecd.helper mysql mysql.helper pgsql pgsql.helper rdiff \
- rdiff.helper rsync sh svn sys sys.helper trac
+ rdiff.helper rsync sh svn sys sys.helper trac tar tar.helper
CLEANFILES = $(HANDLERS)
@@ -90,3 +90,11 @@ trac: $(srcdir)/trac.in
rm -f trac
$(edit) $(srcdir)/trac.in > trac
+tar: $(srcdir)/tar.in
+ rm -f tar
+ $(edit) $(srcdir)/tar.in > tar
+
+tar.helper: $(srcdir)/tar.helper.in
+ rm -f tar.helper
+ $(edit) $(srcdir)/tar.helper.in > tar.helper
+
diff --git a/handlers/dup.helper.in b/handlers/dup.helper.in
index e985c5e..7f82c2f 100644
--- a/handlers/dup.helper.in
+++ b/handlers/dup.helper.in
@@ -124,6 +124,7 @@ do_dup_dest() {
formItem "keep" "$dup_keep"
formItem "incremental" "$dup_incremental"
formItem "increments" "$dup_increments"
+ formItem "keepincroffulls" "$dup_keepincroffulls"
formItem "bandwidthlimit" "$dup_bandwidth"
formItem "sshoptions" "$dup_sshoptions"
formDisplay
@@ -141,8 +142,9 @@ do_dup_dest() {
dup_keep=${thereply[3]}
dup_incremental=${thereply[4]}
dup_increments=${thereply[5]}
- dup_bandwidth=${thereply[6]}
- dup_sshoptions=${thereply[7]}
+ dup_keepincroffulls=${thereply[6]}
+ dup_bandwidth=${thereply[7]}
+ dup_sshoptions=${thereply[8]}
done
set +o noglob
@@ -179,7 +181,7 @@ do_dup_gpg_signkey() {
dup_gpg_onekeypair=no
fi
- if [ "$dup_gpg_onekeypair" == "no" }; then
+ if [ "$dup_gpg_onekeypair" == "no" ]; then
# signkey ?
REPLY=
while [ -z "$REPLY" -o -z "$dup_gpg_signkey" ]; do
@@ -421,9 +423,18 @@ increments = $dup_increments
# (you can also use the time format of duplicity)
# 'keep = yes' means : do not delete old data, the remote host will take care of this
#keep = 60
+#keep = 1Y
#keep = yes
keep = $dup_keep
+# for how many full backups do we keep their later increments ;
+# default is all (keep all increments).
+# increments for older full backups will be deleted : only the more
+# recent ones (count provided) will be kept
+#keepincroffulls = all
+#keepincroffulls = 6
+keepincroffulls = $dup_keepincroffulls
+
# full destination URL, in duplicity format; if set, desturl overrides
# sshoptions, destdir, desthost and destuser; it also disables testconnect and
# bandwithlimit. For details, see duplicity manpage, section "URL FORMAT".
@@ -442,14 +453,14 @@ keep = $dup_keep
# bandwith limit, in kbit/s ; default is 0, i.e. no limit
# if using 'desturl' above, 'bandwidthlimit' must not be set
-# an example setting of 128 kbps would be:
+# an example setting of 128 Kbit/s would be:
#bandwidthlimit = 128
bandwidthlimit = $dup_bandwidth
# passed directly to ssh, scp (and sftp in duplicity >=0.4.2)
# warning: sftp does not support all scp options, especially -i; as
# a workaround, you can use "-o <SSHOPTION>"
-#sshoptions = -o IdentityFile=/root/.ssh/id_dsa_duplicity
+#sshoptions = -o IdentityFile=/root/.ssh/id_rsa_duplicity
sshoptions = $dup_sshoptions
# put the backups under this destination directory
@@ -534,6 +545,7 @@ dup_wizard() {
dup_incremental=yes
dup_increments=30
dup_keep=60
+ dup_keepincroffulls=
dup_bandwidth=
dup_sshoptions=
dup_destdir="/backups/`hostname`"
diff --git a/handlers/dup.in b/handlers/dup.in
index 5216643..b58d34d 100644
--- a/handlers/dup.in
+++ b/handlers/dup.in
@@ -2,7 +2,7 @@
# vim: set filetype=sh sw=3 sts=3 expandtab autoindent:
#
# duplicity script for backupninja
-# requires duplicity
+# requires duplicity >= 0.4.4, and >= 0.4.9 when using a custom tmpdir.
#
getconf options
@@ -26,6 +26,7 @@ setsection dest
getconf incremental yes
getconf increments 30
getconf keep 60
+getconf keepincroffulls all
getconf desturl
getconf awsaccesskeyid
getconf awssecretaccesskey
@@ -51,7 +52,7 @@ fi
### VServers
# If vservers are configured, check that the ones listed in $vsnames do exist.
-local usevserver=no
+usevserver=no
if [ $vservers_are_available = yes ]; then
if [ "$vsnames" = all ]; then
vsnames="$found_vservers"
@@ -106,35 +107,14 @@ duplicity_major="`echo $duplicity_version | @AWK@ -F '.' '{print $1}'`"
duplicity_minor="`echo $duplicity_version | @AWK@ -F '.' '{print $2}'`"
duplicity_sub="`echo $duplicity_version | @AWK@ -F '.' '{print $3}'`"
-### ssh/scp/sftp options
-# 1. duplicity >= 0.4.2 needs --sftp-command
-# (NB: sftp does not support the -l option)
-# 2. duplicity 0.4.3 to 0.4.9 replace --ssh-command with --ssh-options, which is
-# passed to scp and sftp commands by duplicity. We don't use it: since this
-# version does not use the ssh command anymore, we keep compatibility with
-# our previous config files by passing $sshoptions to --scp-command and
-# --sftp-command ourselves
-
+### ssh/scp/sftp options (duplicity < 0.4.3 is unsupported)
scpoptions="$sshoptions"
if [ "$bandwidthlimit" != 0 ]; then
[ -z "$desturl" ] || warning 'The bandwidthlimit option is not used when desturl is set.'
scpoptions="$scpoptions -l $bandwidthlimit"
fi
-
-# < 0.4.2 : only uses ssh and scp
-if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 2 ]; then
- execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --ssh-command 'ssh $sshoptions'"
-# >= 0.4.2 : also uses sftp, --sftp-command option is now supported
-else
- sftpoptions="$sshoptions"
- # == 0.4.2 : uses ssh, scp and sftp
- if [ "$duplicity_major" -eq 0 -a "$duplicity_minor" -eq 4 -a "$duplicity_sub" -eq 2 ]; then
- execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions' --ssh-command 'ssh $sshoptions'"
- # >= 0.4.3 : uses only scp and sftp, --ssh-command option is not supported anymore
- else
- execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions'"
- fi
-fi
+sftpoptions="$sshoptions"
+execstr_options="${execstr_options} --scp-command 'scp $scpoptions' --sftp-command 'sftp $sftpoptions'"
### Symmetric or asymmetric (public/private key pair) encryption
if [ -n "$encryptkey" ]; then
@@ -162,12 +142,7 @@ fi
# full backup.
# If incremental==no, force a full backup anyway.
if [ "$incremental" == "no" ]; then
- # before 0.4.4, full was an option and not a command
- if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 4 ]; then
- execstr_options="${execstr_options} --full"
- else
- execstr_command="full"
- fi
+ execstr_command="full"
else
# we're in incremental mode
if [ "$increments" != "keep" ]; then
@@ -185,7 +160,6 @@ if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 6 -a "$duplicity_sub" -g
fi
### Temporary directory
-precmd=
if [ -n "$tmpdir" ]; then
if [ ! -d "$tmpdir" ]; then
info "Temporary directory ($tmpdir) does not exist, creating it."
@@ -194,7 +168,7 @@ if [ -n "$tmpdir" ]; then
chmod 0700 "$tmpdir"
fi
info "Using $tmpdir as TMPDIR"
- precmd="${precmd}TMPDIR=$tmpdir "
+ execstr_options="${execstr_options} --tempdir '$tmpdir'"
fi
### Archive directory
@@ -211,10 +185,6 @@ if [ "$keep" != "yes" ]; then
if [ "`echo $keep | tr -d 0-9`" == "" ]; then
keep="${keep}D"
fi
- # before 0.4.4, remove-older-than was an option and not a command
- if [ "$duplicity_major" -le 0 -a "$duplicity_minor" -le 4 -a "$duplicity_sub" -lt 4 ]; then
- execstr_options="${execstr_options} --remove-older-than $keep"
- fi
fi
### Source
@@ -269,56 +239,76 @@ fi
### Cleanup commands (duplicity >= 0.4.4)
# cleanup
-if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 4 -a "$duplicity_sub" -ge 4 ]; then
- debug "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart"
+debug "duplicity cleanup --force $execstr_options $execstr_serverpart"
+if [ ! $test ]; then
+ export PASSPHRASE=$password
+ export FTP_PASSWORD=$ftp_password
+ output=`nice -n $nicelevel \
+ su -c \
+ "duplicity cleanup --force $execstr_options $execstr_serverpart 2>&1"`
+ exit_code=$?
+ if [ $exit_code -eq 0 ]; then
+ debug $output
+ info "Duplicity cleanup finished successfully."
+ else
+ debug $output
+ warning "Duplicity cleanup failed."
+ fi
+fi
+
+# remove-older-than
+if [ "$keep" != "yes" ]; then
+ debug "duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"
if [ ! $test ]; then
export PASSPHRASE=$password
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
- su -c \
- "$precmd duplicity cleanup --force $execstr_options $execstr_serverpart 2>&1"`
+ su -c \
+ "duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart 2>&1"`
exit_code=$?
if [ $exit_code -eq 0 ]; then
debug $output
- info "Duplicity cleanup finished successfully."
+ info "Duplicity remove-older-than finished successfully."
else
debug $output
- warning "Duplicity cleanup failed."
+ warning "Duplicity remove-older-than failed."
fi
fi
fi
-# remove-older-than
+# remove-all-inc-of-but-n-full : remove increments of older full backups : only keep latest ones
if [ "$keep" != "yes" ]; then
- if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 4 -a "$duplicity_sub" -ge 4 ]; then
- debug "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart"
- if [ ! $test ]; then
- export PASSPHRASE=$password
- export FTP_PASSWORD=$ftp_password
- output=`nice -n $nicelevel \
- su -c \
- "$precmd duplicity remove-older-than $keep --force $execstr_options $execstr_serverpart 2>&1"`
- exit_code=$?
- if [ $exit_code -eq 0 ]; then
- debug $output
- info "Duplicity remove-older-than finished successfully."
- else
- debug $output
- warning "Duplicity remove-older-than failed."
+ if [ "$keepincroffulls" != "all" ]; then
+ if [ "$duplicity_major" -ge 0 -a "$duplicity_minor" -ge 9 -a "$duplicity_sub" -ge 10 ]; then
+ debug "$precmd duplicity remove-all-inc-of-but-n-full $keepincroffulls --force $execstr_options $execstr_serverpart"
+ if [ ! $test ]; then
+ export PASSPHRASE=$password
+ export FTP_PASSWORD=$ftp_password
+ output=`nice -n $nicelevel \
+ su -c \
+ "$precmd duplicity remove-all-inc-of-but-n-full $keepincroffulls --force $execstr_options $execstr_serverpart 2>&1"`
+ exit_code=$?
+ if [ $exit_code -eq 0 ]; then
+ debug $output
+ info "Duplicity remove-all-inc-of-but-n-full finished successfully."
+ else
+ debug $output
+ warning "Duplicity remove-all-inc-of-but-n-full failed."
+ fi
fi
fi
fi
fi
### Backup command
-debug "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart"
+debug "duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart"
if [ ! $test ]; then
outputfile=`maketemp backupout`
export PASSPHRASE=$password
export FTP_PASSWORD=$ftp_password
output=`nice -n $nicelevel \
su -c \
- "$precmd duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"`
+ "duplicity $execstr_command $execstr_options $execstr_source --exclude '**' / $execstr_serverpart >$outputfile 2>&1"`
exit_code=$?
debug $output
cat $outputfile | (while read output ; do
diff --git a/handlers/ldap.in b/handlers/ldap.in
index 83307ee..600f172 100644
--- a/handlers/ldap.in
+++ b/handlers/ldap.in
@@ -91,7 +91,7 @@ if [ "$ldif" == "yes" ]; then
execstr="$execstr > $dumpdir/$dbsuffix.ldif"
fi
debug "$execstr"
- output=`su root -c "set -o pipefail ; $execstr" 2>&1`
+ output=`su root -s /bin/bash -c "set -o pipefail ; $execstr" 2>&1`
code=$?
if [ "$code" == "0" ]; then
debug $output
diff --git a/handlers/mysql.in b/handlers/mysql.in
index 0282046..185a98a 100644
--- a/handlers/mysql.in
+++ b/handlers/mysql.in
@@ -24,8 +24,8 @@ getconf configfile /etc/mysql/debian.cnf
# Decide if the handler should operate on a vserver or on the host.
# In the former case, check that $vsname exists and is running.
-local usevserver=no
-local vroot
+usevserver=no
+vroot=''
if [ $vservers_are_available = yes ]; then
if [ -n "$vsname" ]; then
# does it exist ?
@@ -303,7 +303,7 @@ then
debug "su $user -c \"$execstr\""
if [ ! $test ]
then
- output=`su $user -c "set -o pipefail ; $execstr" 2>&1`
+ output=`su $user -s /bin/bash -c "set -o pipefail ; $execstr" 2>&1`
code=$?
if [ "$code" == "0" ]
then
diff --git a/handlers/pgsql.helper.in b/handlers/pgsql.helper.in
index ff1cfd4..82e6b48 100644
--- a/handlers/pgsql.helper.in
+++ b/handlers/pgsql.helper.in
@@ -67,6 +67,21 @@ pgsql_wizard() {
pgsql_compress="compress = no"
fi
+ # pg_dump format, defaults to plain, custom is recommended by PostgreSQL
+ menuBox "$pgsql_title" "Choose a pg_dump format:" \
+ plain "Default plain-text sql script, use with psql." \
+ tar "More flexible than the plain, use with pg_restore." \
+ custom "The most flexible format, use with pg_restore."
+ if [ $? = 0 ]; then
+ result="$REPLY"
+ case "$result" in
+ "tar") pgsql_format="format = tar";;
+ "custom") pgsql_format="format = custom";;
+ *) pgsql_format = "format = plain";;
+ esac
+ fi
+
+
# write config file
get_next_filename $configdirectory/20.pgsql
cat >> $next_filename <<EOF
@@ -97,7 +112,22 @@ $pgsql_databases
# if yes, compress the pg_dump/pg_dumpall output.
$pgsql_compress
+# format = < plain | tar | custom > (default = plain)
+# plain - Output a plain-text SQL script file with the extension .sql.
+# When dumping all databases, a single file is created via pg_dumpall.
+# tar - Output a tar archive suitable for input into pg_restore. More
+# flexible than plain and can be manipulated by standard Unix tools
+# such as tar. Creates a globals.sql file and an archive per database.
+# custom - Output a custom PostgreSQL pg_restore archive. This is the most
+# flexible format allowing selective import and reordering of database
+# objects at the time the database is restored via pg_restore. This
+# option creates a globals.sql file containing the cluster role and
+# other information dumped by pg_dumpall -g and a pg_restore file
+# per selected database. See the pg_dump and pg_restore man pages.
+$pgsql_format
+
### You can also set the following variables in backupninja.conf:
+# PSQL: psql path (default: /usr/bin/psql)
# PGSQLDUMP: pg_dump path (default: /usr/bin/pg_dump)
# PGSQLDUMPALL: pg_dumpall path (default: /usr/bin/pg_dumpall)
# PGSQLUSER: user running PostgreSQL (default: postgres)
diff --git a/handlers/pgsql.in b/handlers/pgsql.in
index 0b7badf..a50d3c7 100644
--- a/handlers/pgsql.in
+++ b/handlers/pgsql.in
@@ -8,13 +8,15 @@ getconf backupdir /var/backups/postgres
getconf databases all
getconf compress yes
getconf vsname
+# format maps to pg_dump --format= option, old/default was plain
+getconf format plain
localhost=`hostname`
# Decide if the handler should operate on a vserver or on the host.
# In the former case, check that $vsname exists and is running.
-local usevserver=no
-local vroot
+usevserver=no
+vroot=''
if [ $vservers_are_available = yes ]; then
if [ -n "$vsname" ]; then
# does it exist ?
@@ -35,17 +37,31 @@ fi
# Make sure that the system to backup has the needed executables
if [ $usevserver = yes ]; then
debug "Examining vserver '$vsname'."
- if [ "$databases" == "all" ]; then
+ if [ "$databases" == "all" ] && [ "$format" = "plain" ]; then
[ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMPALL`" ] || \
fatal "Can't find $PGSQLDUMPALL in vserver $vsname."
+ elif [ "$format" != "plain" ]; then
+ [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMPALL`" ] || \
+ fatal "Can't find $PGSQLDUMPALL in vserver $vsname."
+ [ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMP`" ] || \
+ fatal "Can't find $PGSQLDUMP in vserver $vsname."
+ [ -x "$vroot`$VSERVER $vsname exec which $PSQL`" ] || \
+ fatal "Can't find $PSQL in vserver $vsname."
else
[ -x "$vroot`$VSERVER $vsname exec which $PGSQLDUMP`" ] || \
fatal "Can't find $PGSQLDUMP in vserver $vsname."
fi
else
- if [ "$databases" == "all" ]; then
+ if [ "$databases" == "all" ] && [ "$format" = "plain" ]; then
+ [ -x "`which $PGSQLDUMPALL`" ] || \
+ fatal "Can't find $PGSQLDUMPALL."
+ elif [ "$format" != "plain" ]; then
[ -x "`which $PGSQLDUMPALL`" ] || \
fatal "Can't find $PGSQLDUMPALL."
+ [ -x "`which $PGSQLDUMP`" ] || \
+ fatal "Can't find $PGSQLDUMP."
+ [ -x "`which $PSQL`" ] || \
+ fatal "Can't find $PSQL."
else
[ -x "`which $PGSQLDUMP`" ] || \
fatal "Can't find $PGSQLDUMP."
@@ -71,17 +87,41 @@ chown $pguid $vroot$backupdir
debug "chmod 700 $vroot$backupdir"
chmod 700 $vroot$backupdir
+
+# If we are using the custom (best) or tar pg_dump format, and
+# dumping "all" databases, we will substitute "all" for a list
+# of all non-template databases to avoid the use of pg_dumpall.
+dumpglobals="no"
+if [ "$databases" = "all" ] && [ "$format" != "plain" ]; then
+ cmdprefix=""
+ if [ "$usevserver" = "yes" ]; then
+ cmdprefix="$VSERVER $vsname exec "
+ fi
+ execstr="${cmdprefix} su - $PGSQLUSER -c 'psql -AtU $PGSQLUSER -c \"SELECT datname FROM pg_database WHERE NOT datistemplate\"'"
+ debug execstr
+ dblist=""
+ for db in $(eval $execstr 2>&1); do
+ dblist="$dblist $db"
+ done
+ if [ "$dblist" != "" ]; then
+ databases="$dblist"
+ fi
+ # Dump globals (pg_dumpall -g) for roles and tablespaces
+ dumpglobals="yes"
+fi
+
+
# if $databases = all, use pg_dumpall
if [ "$databases" == "all" ]; then
if [ $usevserver = yes ]; then
if [ "$compress" == "yes" ]; then
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${vsname}.sql.gz'\""
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${vsname}.sql.gz'\""
else
execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${vsname}.sql'\""
fi
else
if [ "$compress" == "yes" ]; then
- execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${localhost}-all.sql.gz'\""
+ execstr="su - $PGSQLUSER -s /bin/bash -c \"set -o pipefail ; $PGSQLDUMPALL | $GZIP $GZIP_OPTS > '$backupdir/${localhost}-all.sql.gz'\""
else
execstr="su - $PGSQLUSER -c \"$PGSQLDUMPALL > '$backupdir/${localhost}-all.sql'\""
fi
@@ -101,20 +141,58 @@ if [ "$databases" == "all" ]; then
# else use pg_dump on each specified database
else
- for db in $databases; do
+ # If we're not doing plain format, database=all may now be database=list
+ # so we track the database=all selection in dumpglobals which tells us
+ # to also dump the roles and tablespaces via pg_dumpall -g
+ if [ "$dumpglobals" = "yes" ]; then
+ globalscmd=""
+ if [ "$compress" == "yes" ]; then
+ globalscmd="set -o pipefail ; $PGSQLDUMPALL -g | $GZIP $GZIP_OPTS > '$backupdir/globals.sql.gz'"
+ else
+ globalscmd="$PGSQLDUMPALL -g > '$backupdir/globals.sql'"
+ fi
if [ $usevserver = yes ]; then
- if [ "$compress" == "yes" ]; then
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\""
- else
- execstr="$VSERVER $vsname exec su - $PGSQLUSER -c \"$PGSQLDUMP $db | > '$backupdir/${db}.sql'\""
- fi
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"$globalscmd\""
else
- if [ "$compress" == "yes" ]; then
- execstr="su - $PGSQLUSER -c \"set -o pipefail ; $PGSQLDUMP $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.sql.gz'\""
+ execstr="su - $PGSQLUSER -s /bin/bash -c \"$globalscmd\""
+ fi
+ debug "$execstr"
+ if [ ! $test ]; then
+ output=`eval $execstr 2>&1`
+ code=$?
+ if [ "$code" == "0" ]; then
+ debug $output
+ info "Successfully finished pgsql globals (roles and tablespaces) dump"
else
- execstr="su - $PGSQLUSER -c \"$PGSQLDUMP $db > '$backupdir/${db}.sql'\""
+ warning $output
+ warning "Failed to dump pgsql globals (roles and tablespaces)"
fi
fi
+ fi
+ for db in $databases; do
+ dumpext="sql"
+ if [ "$format" != "plain" ]; then
+ dumpext="pg_dump"
+ fi
+ # To better support the backupninja global GZIP and rsync-friendly GZIP_OPTS
+ # the custom archive format is told to disable compression. The plain format
+ # is uncompressed by default and the tar format doesn't support pg_dump compression.
+ disablecustomcompress=""
+ if [ "$format" = "custom" ]; then
+ disablecustomcompress="--compress=0"
+ fi
+ dumpcmd=""
+ globalscmd=""
+ if [ "$compress" == "yes" ]; then
+ dumpcmd="set -o pipefail ; $PGSQLDUMP --format=$format ${disablecustomcompress} $db | $GZIP $GZIP_OPTS > '$backupdir/${db}.${dumpext}.gz'"
+ else
+ dumpcmd="$PGSQLDUMP --format=$format ${disablecustomcompress} $db | > '$backupdir/${db}.${dumpext}'"
+ fi
+ if [ $usevserver = yes ]; then
+ execstr="$VSERVER $vsname exec su - $PGSQLUSER -s /bin/bash -c \"$dumpcmd\""
+ else
+ execstr="su - $PGSQLUSER -s /bin/bash -c \"$dumpcmd\""
+ fi
debug "$execstr"
if [ ! $test ]; then
output=`eval $execstr 2>&1`
diff --git a/handlers/rdiff.helper.in b/handlers/rdiff.helper.in
index b5bb8bb..039799e 100644
--- a/handlers/rdiff.helper.in
+++ b/handlers/rdiff.helper.in
@@ -168,7 +168,7 @@ do_rdiff_ssh_con() {
if [ ! -f /root/.ssh/id_dsa.pub -a ! -f /root/.ssh/id_rsa.pub ]; then
echo "Creating local root's ssh key"
- ssh-keygen -t dsa -f /root/.ssh/id_dsa -N ""
+ ssh-keygen -t rsa -b 4096 -f /root/.ssh/id_rsa -N ""
echo "Done. hit return to continue"
read
fi
diff --git a/handlers/rdiff.in b/handlers/rdiff.in
index 60386fa..c2f5aa0 100644
--- a/handlers/rdiff.in
+++ b/handlers/rdiff.in
@@ -115,7 +115,7 @@ fi
### CHECK CONFIG ###
# If vservers are configured, check that the ones listed in $vsnames do exist.
-local usevserver=no
+usevserver=no
if [ $vservers_are_available = yes ]; then
if [ "$vsnames" = all ]; then
vsnames="$found_vservers"
@@ -219,7 +219,10 @@ SAVEIFS=$IFS
IFS=$(echo -en "\n\b")
for i in $exclude; do
str="${i//__star__/*}"
- execstr="${execstr}--exclude '$str' "
+ case "$str" in
+ @*) execstr="${execstr}--exclude-globbing-filelist '${str#@}' " ;;
+ *) execstr="${execstr}--exclude '$str' " ;;
+ esac
done
IFS=$SAVEIFS
# includes
@@ -228,7 +231,10 @@ IFS=$(echo -en "\n\b")
for i in $include; do
[ "$i" != "/" ] || fatal "Sorry, you cannot use 'include = /'"
str="${i//__star__/*}"
- execstr="${execstr}--include '$str' "
+ case "$str" in
+ @*) execstr="${execstr}--include-globbing-filelist '${str#@}' " ;;
+ *) execstr="${execstr}--include '$str' " ;;
+ esac
done
IFS=$SAVEIFS
diff --git a/handlers/rsync.in b/handlers/rsync.in
index fea7e7b..d93411a 100644
--- a/handlers/rsync.in
+++ b/handlers/rsync.in
@@ -60,7 +60,7 @@
# exclude_vserver = vserver-name (valid only if vservers = yes on backupninja.conf)
# numericids = when set to 1, use numeric ids instead of user/group mappings on rsync
# compress = if set to 1, compress data on rsync (remote source only)
-# bandwidthlimit = set a badnwidth limit in kbps (remote source only)
+# bandwidthlimit = set a badnwidth limit in KB/s (remote source only)
# remote_rsync = remote rsync program (remote source only)
# id_file = ssh key file (remote source only)
# batch = set to "yes" to rsync use a batch file as source
@@ -79,7 +79,7 @@
# port = remote port number (remote destination only)
# user = remote user name (remote destination only)
# id_file = ssh key file (remote destination only)
-# bandwidthlimit = set a badnwidth limit in kbps (remote destination only)
+# bandwidthlimit = set a badnwidth limit in KB/s (remote destination only)
# remote_rsync = remote rsync program (remote dest only)
# batch = set to "yes" to rsync write a batch file from the changes
# batchbase = folder where the batch file should be written
@@ -253,7 +253,7 @@ function eval_config {
if [ -z "$days" ]; then
keep="4"
else
- keep="`echo $days - 1 | bc -l`"
+ keep=$[$days - 1]
fi
fi
@@ -302,9 +302,9 @@ function rotate_short {
$nice $mv /$folder.$keep /$folder.tmp
fi
- for ((n=`echo "$keep - 1" | bc`; n >= 0; n--)); do
+ for ((n=$[$keep - 1]; n >= 0; n--)); do
if [ -d $folder.$n ]; then
- dest=`echo "$n + 1" | bc`
+ dest=$[$n + 1]
$nice $mv /$folder.$n /$folder.$dest
$touch /$folder.$dest
mkdir -p $metadata/`basename $folder`.$dest
@@ -384,14 +384,14 @@ function rotate_long {
for rottype in daily weekly monthly; do
seconds=$((seconds_${rottype}))
-
dir="$backuproot/$rottype"
- metadata="$backuproot/metadata/$rottype.1"
- mkdir -p $metadata
+ metadata="$backuproot/metadata/$rottype"
+
+ mkdir -p $metadata.1
if [ ! -d $dir.1 ]; then
echo "Debug: $dir.1 does not exist, skipping."
continue 1
- elif [ ! -f $metadata/created ] && [ ! -f $metadata/rotated ]; then
+ elif [ ! -f $metadata.1/created ] && [ ! -f $metadata.1/rotated ]; then
echo "Warning: metadata does not exist for $dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
@@ -401,10 +401,10 @@ function rotate_long {
[ "$oldest" == "" ] && oldest=0
for (( i=$oldest; i > 0; i-- )); do
if [ -d $dir.$i ]; then
- if [ -f $metadata/created ]; then
- created=`tail -1 $metadata/created`
- elif [ -f $metadata/rotated ]; then
- created=`tail -1 $metadata/rotated`
+ if [ -f $metadata.$i/created ]; then
+ created=`tail -1 $metadata.$i/created`
+ elif [ -f $metadata.$i/rotated ]; then
+ created=`tail -1 $metadata.$i/rotated`
else
created=0
fi
@@ -414,8 +414,8 @@ function rotate_long {
if [ ! -d $dir.$next ]; then
echo "Debug: $rottype.$i --> $rottype.$next"
$nice mv $dir.$i $dir.$next
- mkdir -p $backuproot/metadata/$rottype.$next
- date +%c%n%s > $backuproot/metadata/$rottype.$next/rotated
+ mkdir -p $metadata.$next
+ date +%c%n%s > $metadata.$next/rotated
else
echo "Debug: skipping rotation of $dir.$i because $dir.$next already exists."
fi
@@ -485,14 +485,14 @@ function rotate_long_remote {
for rottype in daily weekly monthly; do
seconds=\$((seconds_\${rottype}))
-
dir="$backuproot/\$rottype"
- metadata="$backuproot/metadata/\$rottype.1"
- mkdir -p \$metadata
+ metadata="$backuproot/metadata/\$rottype"
+
+ mkdir -p \$metadata.1
if [ ! -d \$dir.1 ]; then
echo "Debug: \$dir.1 does not exist, skipping."
continue 1
- elif [ ! -f \$metadata/created ] && [ ! -f \$metadata/rotated ]; then
+ elif [ ! -f \$metadata.1/created ] && [ ! -f \$metadata.1/rotated ]; then
echo "Warning: metadata does not exist for \$dir.1. This backup may be only partially completed. Skipping rotation."
continue 1
fi
@@ -502,10 +502,10 @@ function rotate_long_remote {
[ "\$oldest" == "" ] && oldest=0
for (( i=\$oldest; i > 0; i-- )); do
if [ -d \$dir.\$i ]; then
- if [ -f \$metadata/created ]; then
- created=\`tail -1 \$metadata/created\`
- elif [ -f \$metadata/rotated ]; then
- created=\`tail -1 \$metadata/rotated\`
+ if [ -f \$metadata.\$i/created ]; then
+ created=\`tail -1 \$metadata.\$i/created\`
+ elif [ -f \$metadata.\$i/rotated ]; then
+ created=\`tail -1 \$metadata.\$i/rotated\`
else
created=0
fi
@@ -515,8 +515,8 @@ function rotate_long_remote {
if [ ! -d \$dir.\$next ]; then
echo "Debug: \$rottype.\$i --> \$rottype.\$next"
$nice mv \$dir.\$i \$dir.\$next
- mkdir -p $backuproot/metadata/\$rottype.\$next
- date +%c%n%s > $backuproot/metadata/\$rottype.\$next/rotated
+ mkdir -p \$metadata.\$next
+ date +%c%n%s > \$metadata.\$next/rotated
else
echo "Debug: skipping rotation of \$dir.\$i because \$dir.\$next already exists."
fi
@@ -528,7 +528,7 @@ function rotate_long_remote {
done
max=\$((keepdaily+1))
- if [ \$keepweekly -gt 0 -a -d $backuproot/daily.\$max -a ! -d \$backuproot/weekly.1 ]; then
+ if [ \$keepweekly -gt 0 -a -d $backuproot/daily.\$max -a ! -d $backuproot/weekly.1 ]; then
echo "Debug: daily.\$max --> weekly.1"
$nice mv $backuproot/daily.\$max $backuproot/weekly.1
mkdir -p $backuproot/metadata/weekly.1
diff --git a/handlers/svn.in b/handlers/svn.in
index 5e5531a..bb70eee 100644
--- a/handlers/svn.in
+++ b/handlers/svn.in
@@ -14,8 +14,8 @@ error=0
# Decide if the handler should operate on a vserver or on the host.
# In the former case, check that $vsname exists and is running.
-local usevserver=no
-local vroot
+usevserver=no
+vroot=''
if [ $vservers_are_available = yes ]; then
if [ -n "$vsname" ]; then
# does it exist ?
diff --git a/handlers/sys.in b/handlers/sys.in
index fcf3e31..74133a3 100755
--- a/handlers/sys.in
+++ b/handlers/sys.in
@@ -103,7 +103,7 @@ getconf lvm no
getconf vsnames all
# If vservers are configured, check that the ones listed in $vsnames are running.
-local usevserver=no
+usevserver=no
if [ $vservers_are_available = yes ]; then
if [ "$vsnames" = all ]; then
vsnames="$found_vservers"
@@ -350,7 +350,7 @@ STATUS="Getting kernel version:"
catifexec "/bin/uname" "-a"
STATUS="Checking module information:"
catifexec "/sbin/lsmod"
-for x in $(/sbin/lsmod | /bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
+for x in $(/sbin/lsmod | /usr/bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null
) ; do
STATUS="Checking module information $x:"
catifexec "/sbin/modinfo" "$x"
@@ -542,7 +542,7 @@ fi
#
if [ "$hardware" == "yes" ]; then
- if [ "dohwinfo" == "yes" ]; then
+ if [ "$dohwinfo" == "yes" ]; then
if [ -f $hardwarefile ]; then
rm $hardwarefile
fi
diff --git a/handlers/tar.helper.in b/handlers/tar.helper.in
index cdbe03a..4a483be 100644
--- a/handlers/tar.helper.in
+++ b/handlers/tar.helper.in
@@ -17,7 +17,7 @@ tar_wizard() {
tar_backupname="backupname = $REPLY"
backupname="$REPLY"
- inputBox "$tar_title" "Directory where to store the backups" "/net/backups/$backupname"
+ inputBox "$tar_title" "Directory where to store the backups" "/var/backups/tar/$backupname"
[ $? = 1 ] && return
tar_backupdir="backupdir = $REPLY"
diff --git a/handlers/wget b/handlers/wget
index ebb391e..67425fc 100644
--- a/handlers/wget
+++ b/handlers/wget
@@ -88,9 +88,9 @@ function rotate {
$nice $mv /$1.$2 /$1.tmp
fi
- for ((n=`echo "$2 - 1" | bc`; n >= 0; n--)); do
+ for ((n=$[$2 - 1]; n >= 0; n--)); do
if [ -d $1.$n ]; then
- dest=`echo "$n + 1" | bc`
+ dest=$[$n + 1]
$nice $mv /$1.$n /$1.$dest
$touch /$1.$dest
fi
@@ -128,7 +128,7 @@ fi
if [ -z "$days" ]; then
keep="4"
else
- keep="`echo $days - 1 | bc -l`"
+ keep=$[$days - 1]
fi
# lockfile setup
diff --git a/man/backupninja.1 b/man/backupninja.1
index 5622881..4363222 100644
--- a/man/backupninja.1
+++ b/man/backupninja.1
@@ -104,7 +104,7 @@ Then, vital parts of the file system, including /var/backups, are nightly pushed
.TP
In order for this to work (ie for diff-backup to run unattended), you must create ssh keys on the source server and copy the public key to the remote user's authorized keys file. For example:
.br
-root@srchost# ssh-keygen -t dsa
+root@srchost# ssh-keygen -t rsa -b 4096
.br
root@srchost# ssh-copy-id -i /root/.ssh/id_dsa.pub backup@desthost
.TP
diff --git a/src/backupninja.in b/src/backupninja.in
index 789debd..f6a5b0e 100755
--- a/src/backupninja.in
+++ b/src/backupninja.in
@@ -313,23 +313,47 @@ function process_action() {
debug $run
[ "$run" == "no" ] && return
- let "actions_run += 1"
+ # Prepare for lock creation
+ if [ ! -d /var/lock/backupninja ]; then
+ mkdir /var/lock/backupninja
+ fi
+ lockfile=`echo $file | @SED@ 's,/,_,g'`
+ lockfile=/var/lock/backupninja/$lockfile
- # call the handler:
local bufferfile=`maketemp backupninja.buffer`
echo "" > $bufferfile
- echo_debug_msg=1
+
+ # start locked section : avoid concurrent execution of the same backup
+ # uses a construct specific to shell scripts with flock. See man flock for details
(
- . $scriptdirectory/$suffix $file
- ) 2>&1 | (
- while read a; do
- echo $a >> $bufferfile
- [ $debug ] && colorize "$a"
- done
- )
- retcode=$?
- # ^^^^^^^^ we have a problem! we can't grab the return code "$?". grrr.
- echo_debug_msg=0
+ debug "executing handler in locked section controlled by $lockfile"
+ flock -x -w 5 200
+ # if all is good, we acquired the lock
+ if [ $? -eq 0 ]; then
+
+ let "actions_run += 1"
+
+ # call the handler:
+ echo_debug_msg=1
+ (
+ . $scriptdirectory/$suffix $file
+ ) 2>&1 | (
+ while read a; do
+ echo $a >> $bufferfile
+ [ $debug ] && colorize "$a"
+ done
+ )
+ retcode=$?
+ # ^^^^^^^^ we have a problem! we can't grab the return code "$?". grrr.
+ echo_debug_msg=0
+
+ else
+ # a backup is probably ongoing already, so display an error message
+ debug "failed to acquire lock"
+ echo "Fatal: Could not acquire lock $lockfile. A backup is probably already running for $file." >>$bufferfile
+ fi
+ ) 200> $lockfile
+ # end of locked section
_warnings=`cat $bufferfile | grep "^Warning: " | wc -l`
_errors=`cat $bufferfile | grep "^Error: " | wc -l`
@@ -339,6 +363,7 @@ function process_action() {
ret=`grep "\(^Info: \|^Warning: \|^Error: \|^Fatal: \|Halt: \)" $bufferfile`
rm $bufferfile
+
if [ $_halts != 0 ]; then
msg "*halt* -- $file"
errormsg="$errormsg\n== halt request from $file==\n\n$ret\n"
@@ -476,6 +501,7 @@ getconf MYSQLADMIN /usr/bin/mysqladmin
getconf MYSQL /usr/bin/mysql
getconf MYSQLHOTCOPY /usr/bin/mysqlhotcopy
getconf MYSQLDUMP /usr/bin/mysqldump
+getconf PSQL /usr/bin/psql
getconf PGSQLDUMP /usr/bin/pg_dump
getconf PGSQLDUMPALL /usr/bin/pg_dumpall
getconf PGSQLUSER postgres