#!/bin/bash
#
-# WizBackup 2.0 - Simple rsync backup
+# WizBackup 2.0 - Simple rsync backup with snapshots
# Based on incremental-backup 0.1 by Matteo Mattei
#
# Copyright 2006 Matteo Mattei <matteo.mattei@gmail.com>
-# Copyright 2007, 2008, 2009, 2010, 2011 Bernie Innocenti <bernie@codewiz.org>
+# Copyright 2007, 2008, 2009, 2010, 2011, 2012 Bernie Innocenti <bernie@codewiz.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
-
if [ $# -lt 2 ]; then
echo "Usage: $0 SOURCE DEST [RSYNC_OPTS]"
exit 1
fi
-# Fail on any error; Treat undefined variables as errors
-set -e -u
+# Treat undefined variables as errors
+set -u
#####################################################################
# CONFIGURATION
#####################################################################
-# Source path
+# Source rsync URL
SRC=$1; shift
-# DESTINATION DIRECTORY (must exists)
+# Destination directory (will be created if it doesn't exist)
DEST=$1; shift
+CONF_FILE="/etc/wizbackup/wizbackup.conf"
+
# NOTE: --timeout needs to be large enough: if a large dir tree don't change a lot of time can pass without I/O
# NOTE: --inplace will clobber linked files in older snapshots. DON'T USE IT!
RSYNC_OPTS="-HAXa --stats --timeout 1800 --numeric-ids --delete --delete-excluded --ignore-errors $@"
# Number of saved snapshots
SNAPSHOTS=45
+# Abort backup if the destination volume has less than these GBs free
+MIN_FREE_GB=10
+
RESULT=500
DATE=`date +"%Y%m%d"`
DEST="`echo $DEST | sed -e 's/\/$//'`"
+if [ -f "$CONF_FILE" ]; then
+ source /etc/wizbackup/wizbackup.conf
+fi
# Use "backup" ssh key with ssh protocol, or password file for rsync protocol
if [ "${SRC%:*}" == "rsync" ]; then
do_prune()
{
- local old="`ls | grep -v tmp | head -n -$SNAPSHOTS`"
+ local old=`ls | grep -v tmp | head -n -$SNAPSHOTS | tr '\n' ' '`
if [ ! -z "$old" ]; then
echo "$(date): Removing oldest snapshot(s): $old..."
- rm -rf "$old" || exit 669
+ rm -rf $old || exit 669
fi
}
do_link()
{
- local newest=`ls | grep -v tmp | tail -n 1`
+ local newest=`ls | grep -v tmp | tail -n 1`
if [ -d "$DEST/tmp" ]; then
echo "$(date): Continuing with pre-existing snapshot $DEST/tmp"
elif [ -z "$newest" ]; then
echo "$(date): Linking snapshot $DEST/$newest to $DEST/tmp"
# TODO: Creating the hardlinks takes a lot of time.
# Perhaps we could save time by recycling the oldest snapshot
- cp -lR "$DEST/$newest" "$DEST/tmp" || exit 670
+ cp -la "$DEST/$newest" "$DEST/tmp"
+ RESULT=$?
+ if [ $RESULT -ne 0 ]; then
+ echo "$(date): Failed to setup tmp snapshot: $RESULT. Cleaning up."
+ rm -rf "$DEST/tmp"
+ exit $RESULT
+ fi
fi
}
do_test()
{
+ # TODO: test for free space and free inodes in the $DEST filesystem
+ block_size=`stat --file-system --format "%S" "$DEST"`
+ free_blocks=`stat --file-system --format "%f" "$DEST"`
+ free_inodes=`stat --file-system --format "%d" "$DEST"`
+ free_gb=$((block_size * free_blocks / 1024 / 1024 / 1024))
+
+ if [ "$free_gb" -lt "$MIN_FREE_GB" ]; then
+ echo "$(date): Aborting due to insufficient free space ${free_gb}GB."
+ exit 670
+ fi
+
# Avoid clobbering the latest snapshot if the remote host does
# not allow us to connect
# --contimeout: sometimes hangs on connection...
fi
}
-######################
-# MAIN
-######################
+######################################
+# Main
+######################################
# make sure to be root
if (( `id -u` != 0 )); then { echo "Sorry, must be root. Exiting..."; exit; } fi
-echo "$(date): START backup: $SRC -> $DEST"
+echo "$(date): BEGIN backup: $0 $@"
+echo "$(date): $0 $SRC $DEST $@"
do_init
do_prune
do_test