backup.py

Sun, 21 Jan 2018 17:20:00 +0000

author
Tuomo Valkonen <tuomov@iki.fi>
date
Sun, 21 Jan 2018 17:20:00 +0000
changeset 36
1478f8722690
parent 34
9fce700d42de
child 38
085a635f23f5
permissions
-rw-r--r--

Set FIFO log level to INFO

#
# Borgend Backup instance
#

import config
import logging
import time
import keyring
import borgend
from instance import BorgInstance
from queue import Queue
from threading import Thread, Lock, Timer

logger=borgend.logger.getChild(__name__)

loglevel_translation={
    'CRITICAL': logging.CRITICAL,
    'ERROR': logging.ERROR,
    'WARNING': logging.WARNING,
    'DEBUG': logging.DEBUG,
    'INFO': logging.INFO
}

def translate_loglevel(x):
    if x in loglevel_translation:
        return loglevel_translation[x]
    else:
        return logging.ERROR

def safe_get_int(t, x):
    if x in t:
        tmp=t[x]
        if isinstance(tmp, int):
            return tmp
    return None


class Backup:

    def __decode_config(self, cfg):
        loc0='backup target %d' % self.identifier

        self.name=config.check_string(cfg, 'name', 'Name', loc0)

        self.loc='backup target "%s"' % self.name

        self.repository=config.check_string(cfg, 'repository',
                                            'Target repository', self.loc)

        self.archive_prefix=config.check_string(cfg, 'archive_prefix',
                                                'Archive prefix', self.loc)

        self.archive_template=config.check_string(cfg, 'archive_template',
                                                  'Archive template', self.loc)

        self.backup_interval=config.check_nonneg_int(cfg, 'backup_interval',
                                                     'Backup interval', self.loc,
                                                     config.defaults['backup_interval'])

        self.retry_interval=config.check_nonneg_int(cfg, 'retry_interval',
                                                    'Retry interval', self.loc,
                                                    config.defaults['retry_interval'])

        self.paths=config.check_nonempty_list_of_strings(cfg, 'paths', 'Paths', self.loc)

        self.common_parameters=config.check_list_of_dicts(cfg, 'common_parameters',
                                                         'Borg parameters', self.loc,
                                                         default=[])

        self.create_parameters=config.check_list_of_dicts(cfg, 'create_parameters',
                                                         'Create parameters', self.loc,
                                                         default=[])

        self.prune_parameters=config.check_list_of_dicts(cfg, 'prune_parameters',
                                                         'Prune parameters', self.loc,
                                                         default=[])

        self.__keychain_account=config.check_string(cfg, 'keychain_account',
                                                    'Keychain account', self.loc,
                                                    default='')

        self.__passphrase=None

        if config.settings['extract_passphrases_at_startup']:
            try:
                self.extract_passphrase()
            except Exception:
                pass

    def extract_passphrase(self):
        acc=self.__keychain_account
        if not self.__passphrase:
            if acc and acc!='':
                logger.debug('Requesting passphrase')
                try:
                    pw=keyring.get_password("borg-backup", acc)
                except Exception as err:
                    logger.error('Failed to retrieve password: %s' % str(err))
                    raise err
                else:
                    logger.debug('Received passphrase')
                self.__passphrase=pw
            else:
                self.__passphrase=None
        return self.__passphrase

    def __init__(self, identifier, cfg):
        self.identifier=identifier

        self.__decode_config(cfg)

        self.config=config
        self.lastrun_when=None
        self.lastrun_success=None
        self.borg_instance=None
        self.current_operation=None
        self.thread_log=None
        self.thread_res=None
        self.timer=None
        self.scheduled_operation=None
        self.lock=Lock()
        self.__status_update_callback=None

    def is_running(self):
        with self.lock:
            running=self.__is_running_unlocked()
        return running

    def __is_running_unlocked(self):
        running=self.current_operation
        if not running:
            # Consistency check
            assert((not self.borg_instance and not self.thread_log and
                    not self.thread_res))
        return running

    def __block_when_running(self):
        running=self.is_running()
        assert(not running)

    def __log_listener(self):
        logger.debug('Log listener thread waiting for entries')
        success=True
        for status in iter(self.borg_instance.read_log, None):
            logger.debug(str(status))
            t=status['type']

            errors_this_message=None
            callback=None

            if t=='progress_percent':
                current=safe_get_int(status, 'current')
                total=safe_get_int(status, 'total')
                if current is not None and total is not None:
                    with self.lock:
                        self.current_operation['progress_current']=current
                        self.current_operation['progress_total']=total
                        status, callback=self.__status_unlocked()

            elif t=='archive_progress':
                original_size=safe_get_int(status, 'original_size')
                compressed_size=safe_get_int(status, 'compressed_size')
                deduplicated_size=safe_get_int(status, 'deduplicated_size')
                if original_size is not None and original_size is not None and deduplicated_size is not None:
                    with self.lock:
                        self.current_operation['original_size']=original_size
                        self.current_operation['compressed_size']=compressed_size
                        self.current_operation['deduplicated_size']=deduplicated_size
                        status, callback=self.__status_unlocked()

            elif t=='progress_message':
                pass

            elif t=='file_status':
                pass

            elif t=='log_message':
                if 'levelname' not in status:
                    status['levelname']='ERROR'
                if 'message' not in status:
                    status['message']='UNKNOWN'
                if 'name' not in status:
                    status['name']='borg'
                lvl=translate_loglevel(status['levelname'])
                logger.log(lvl, status['name'] + ': ' + status['message'])
                if lvl>=logging.WARNING:
                    errors_this_message=status
                    with self.lock:
                        self.current_operation['errors']=True
                        status, callback=self.__status_unlocked()
            else:
                logger.debug('Unrecognised log entry %s' % str(status))

            if callback:
                callback(self, status, errors=errors_this_message)

        logger.debug('Waiting for borg subprocess to terminate in log thread')

        self.borg_instance.wait()

        logger.debug('Borg subprocess terminated; terminating log listener thread')

    def __result_listener(self):
        with self.lock:
            status, callback=self.__status_unlocked()
        if callback:
            callback(self, status)

        logger.debug('Result listener thread waiting for result')

        res=self.borg_instance.read_result()

        success=True

        logger.debug('Borg result: %s' % str(res))

        if res==None:
            success=False

        logger.debug('Waiting for borg subprocess to terminate in result thread')

        success=success and self.borg_instance.wait()

        logger.debug('Borg subprocess terminated (success: %s); terminating result listener thread' % str(success))

        self.thread_log.join()

        with self.lock:
            if self.current_operation['operation']=='create':
                self.lastrun_when=self.current_operation['when_monotonic']
                self.lastrun_success=success
            self.thread_res=None
            self.thread_log=None
            self.borg_instance=None
            self.current_operation=None
            self.__schedule_unlocked()
            status, callback=self.__status_unlocked()
        if callback:
            callback(self, status)

    def __do_launch(self, queue, op, archive_or_repository, *args):
        passphrase=self.extract_passphrase()

        inst=BorgInstance(op['operation'], archive_or_repository, *args)
        inst.launch(passphrase=passphrase)

        logger.debug('Creating listener threads')

        t_log=Thread(target=self.__log_listener)
        t_log.daemon=True

        t_res=Thread(target=self.__result_listener)
        t_res.daemon=True

        self.thread_log=t_log
        self.thread_res=t_res
        self.borg_instance=inst
        self.queue=queue
        self.current_operation=op
        self.current_operation['when_monotonic']=time.monotonic()

        t_log.start()
        t_res.start()

    def __launch(self, op, queue):
        if self.__is_running_unlocked():
            logging.info('Cannot start %s: already running %s'
                         % (operation, self.current_operation))
            return False
        else:
            if self.timer:
                logger.debug('Unscheduling timed operation due to launch of operation')
                self.timer=None
                self.scheduled_operation=None

            try:
                logger.debug("Launching '%s' on '%s'" % (op['operation'], self.name))

                if op['operation']=='create':
                    archive="%s::%s%s" % (self.repository,
                                          self.archive_prefix,
                                          self.archive_template)

                    self.__do_launch(queue, op, archive,
                                     self.common_parameters+self.create_parameters,
                                     self.paths)
                elif op['operation']=='prune':
                    self.__do_launch(queue, op, self.repository,
                                     ([{'prefix': self.archive_prefix}] + 
                                      self.common_parameters +
                                      self.prune_parameters))
                else:
                    raise NotImplementedError("Invalid operation '%s'" % op['operation'])
            except Exception as err:
                logger.debug('Rescheduling after failure')
                self.lastrun_when=time.monotonic()
                self.lastrun_success=False
                self.__schedule_unlocked()
                raise err

            return True

    def create(self, queue):
        op={'operation': 'create', 'detail': 'manual'}
        with self.lock:
            res=self.__launch(op, queue)
        return res

    def prune(self, queue):
        op={'operation': 'prune', 'detail': 'manual'}
        with self.lock:
            res=self.__launch(op, queue)
        return res

    # TODO: Decide exact (manual) abort mechanism. Perhaps two stages
    def abort(self):
        with self.lock:
            if self.borg_instance:
                self.borg_instance.terminate()
            thread_log=self.thread_log
            thread_res=self.thread_res

        if thread_log:
            thread_log.terminate()

        if thread_res:
            thread_res.terminate()


    def join(self):
        logger.debug('Waiting for borg listener threads to terminate')

        with self.lock:
            thread_log=self.thread_log
            thread_res=self.thread_res

        if thread_log:
            thread_log.join()

        if thread_res:
            thread_res.join()

        assert(self.thread_log==None and self.thread_res==None)

    def __queue_timed_operation(self):
        with self.lock:
            op=self.scheduled_operation
            self.scheduled_operation=None
            self.timer=None

            if self.__is_running_unlocked():
                logger.info('Aborted queue operation, as an operation is already running')
            else:
                # TODO: Queue on 'repository' and online status for SSH, etc.

                # TODO: UI comms. queue?
                self.__launch(op, None)

    def __next_operation_unlocked(self):
        # TODO: pruning as well
        now=time.monotonic()
        if not self.lastrun_when:
            initial_interval=self.retry_interval
            if initial_interval==0:
                initial_interval=self.backup_interval
            if initial_interval==0:
                return None
            else:
                return {'operation': 'create',
                        'detail': 'initial',
                        'when_monotonic': now+initial_interval}
        elif not self.lastrun_success:
            if self.retry_interval==0:
                return None
            else:
                return {'operation': 'create',
                        'detail': 'retry',
                        'when_monotonic': self.lastrun_when+self.retry_interval}
        else:
            if self.backup_interval==0:
                return None
            else:
                return {'operation': 'create',
                        'detail': None,
                        'when_monotonic': self.lastrun_when+self.backup_interval}

    def __schedule_unlocked(self):
        if self.current_operation:
            return self.current_operation
        else:
            op=self.__next_operation_unlocked()

            if op:
                now=time.monotonic()
                delay=max(0, op['when_monotonic']-now)
                logger.info("Scheduling '%s' (detail: %s) of '%s' in %d seconds" %
                            (op['operation'], op['detail'], self.name, delay))
                tmr=Timer(delay, self.__queue_timed_operation)
                self.scheduled_operation=op
                self.timer=tmr
                tmr.start()

            return op

    def schedule(self):
        with self.lock:
            return self.__schedule_unlocked()

    def status(self):
        with self.lock:
            res=self.__status_unlocked()
        return res[0]

    def __status_unlocked(self):
        callback=self.__status_update_callback
        if self.current_operation:
            status=self.current_operation
            status['type']='current'
        elif self.scheduled_operation:
            status=self.scheduled_operation
            status['type']='scheduled'
        else:
            status={'type': 'nothing'}

        status['name']=self.name

        if 'errors' not in status:
            status['errors']=False

        if 'when_monotonic' in status:
            status['when']=(status['when_monotonic']
                            -time.monotonic()+time.time())

        return status, callback

    def set_status_update_callback(self, callback):
        with self.lock:
            self.__status_update_callback=callback

mercurial