from abc import ABC, abstractmethod import contextlib import errno import logging import re import secrets import socket import subprocess from .exceptions import BackupException, RemoteExecException from ._mountinfo import MountInfo from ._path import AbsPath, ROOT from .ssh_remote import SSHRemote from . import repository from . import _ssh_client def _parse_name(name): """ Parse a backup name into a remote specification. """ # split off the username if not '@' in name: raise ValueError('Invalid backup name: "%s", must be of format user@host') username, _, host = name.partition('@') port = 22 # overridden later if specified in name colons = host.count(':') if colons >= 2: # IPv6 literal, possibly with port m = re.match(r'\[(.+)\](:\d+)?', host, re.ASCII | re.IGNORECASE) if m is not None: # [literal]:port host, port = m.groups() elif colons == 1: # host:port host, _, port = host.partition(':') return SSHRemote(host, port, username) class Target(ABC): name = None dirs = None excludes = None _logger = None def __init__(self, name, dirs, excludes = None, logger = None): if excludes is None: excludes = [] if len(dirs) < 1: raise ValueError('One or more dirs to backup required') self.name = name self.dirs = list(map(AbsPath, dirs)) self.excludes = list(map(AbsPath, excludes)) if logger is None: self._logger = logging.getLogger('%s.%s' % (self.__class__.__name__, self.name)) else: self._logger = logger def __repr__(self): return "%s{%s/%s}" % (self.__class__.__name__, self.dirs, self.excludes) def _log_command(self, name, retcode, stdout, stderr): self._logger.debug('%s finished with return code %d' % (name, retcode)) def sanitize(b): LOG_LEN = 128 # truncate and decode s = b[:LOG_LEN].decode('utf-8', errors = 'backslashreplace') # replace newlines with literal \n's s = r'\n'.join(s.splitlines()) # add ellipsis if truncated if len(b) > LOG_LEN: s += '[...]' return s if len(stdout) > 0: self._logger.debug('%s stdout: %s' % (name, sanitize(stdout))) if len(stderr) > 0: self._logger.debug('%s stderr: %s' % (name, sanitize(stderr))) def _do_save(self, bup_exec, dry_run, *, reparent = None, index_opts = None, save_opts = None): dirs = self.dirs excludes = self.excludes if reparent is not None: dirs = [d.reparent(*reparent) for d in dirs] excludes = [d.reparent(*reparent) for d in excludes] if index_opts is None: index_opts = [] if save_opts is None: save_opts = [] # index cmd = bup_exec + ['index', '--update', '--one-file-system'] + index_opts cmd.extend(['--exclude=%s' % str(e) for e in excludes]) cmd.extend(map(str, dirs)) if dry_run: self._logger.debug('Not executing index command: ' + str(cmd)) else: self._logger.debug('Executing index command: ' + str(cmd)) res_idx = subprocess.run(cmd, capture_output = True) self._log_command('Index', res_idx.returncode, res_idx.stdout, res_idx.stderr) # save cmd = bup_exec + ['save', '-n', self.name] + save_opts + list(map(str, dirs)) if dry_run: self._logger.debug('Not executing save command: ' + str(cmd)) else: self._logger.debug('Executing save command: ' + str(cmd)) res_save = subprocess.run(cmd, capture_output = True) self._log_command('Save', res_save.returncode, res_save.stdout, res_save.stderr) retcode = 0 output = b'' if not dry_run: if res_idx.returncode != 0: retcode = res_idx.returncode output += res_idx.stderr + res_idx.stdout if res_save.returncode != 0: retcode = res_save.returncode output += res_save.stderr + res_save.stdout result = repository.StepResult(retcode == 0, output) return result @abstractmethod def save(self, dry_run = False): pass class TargetLocal(Target): def save(self, dry_run = False): return self._do_save(['bup'], dry_run) class TargetSSH(Target): _remote = None _remote_bupdir = None def __init__(self, name, dirs, excludes = None, logger = None, remote = None, remote_bupdir = None): if remote is None: remote = _parse_name(name) if remote.proxy_remote is not None: raise NotImplementedError('Proxy remote not implemented') if remote.port != 22: raise NotImplementedError('Specifying port not implemented') self._remote = remote if remote_bupdir is None: remote_bupdir = '$HOME/.bup' self._remote_bupdir = remote_bupdir super().__init__(name, dirs, excludes, logger) def __str__(self): return "%s{SSH:%s}" % (super().__str__(), str(self._remote)) def _paramiko_exec_cmd(self, client, cmd, decode = True): self._logger.debug('Client %s: executing command: %s' % (client, cmd)) res = client.exec_command(cmd) chan = res[0].channel chan.settimeout(64) try: out, err = res[1].read(), res[2].read() except socket.timeout as t: raise RemoteExecException('Timeout waiting for command output', errno.ETIMEDOUT, b'') from t chan.recv_exit_status() if chan.exit_status != 0: raise RemoteExecException('Error executing "%s"' % cmd, chan.exit_status, err + out) self._log_command('Remote command', chan.exit_status, out, err) if decode: out = out.decode('utf-8', errors = 'backslashreplace') return out def _resolve_remote_bupdir(self, ssh): bupdir = self._paramiko_exec_cmd(ssh, 'realpath -e ' + self._remote_bupdir).splitlines() if (len(bupdir) != 1 or len(bupdir[0]) <= 1 or bupdir[0][0] != '/' or re.search(r'\s', bupdir[0])): raise BackupException('Invalid BUP_DIR on the remote target: %s' % str(bupdir)) return bupdir[0] def save(self, dry_run = False): with _ssh_client.SSHConnection(self._remote) as ssh: remote_bupdir = self._resolve_remote_bupdir(ssh) bup_exec = ['bup', 'on', '%s@%s' % (self._remote.username, self._remote.host), '-d', remote_bupdir] return self._do_save(bup_exec, dry_run) class TargetSSHLVM(TargetSSH): """ This target backs up a remote host using LVM snapshots. All the dirs backed up must be on same LV. Requires root login on the system. """ _snapshot_size = None def __init__(self, name, dirs, excludes = None, logger = None, remote = None, remote_bupdir = None, snapshot_size = '20G'): self._snapshot_size = snapshot_size super().__init__(name, dirs, excludes, logger, remote, remote_bupdir) def __str__(self): return "%s{LVM:%s}" % (super().__str__(), self._snapshot_size) def _resolve_mntdev(self, mntinfo): """ Find out which LV to snapshot. This also checks that all the dirs are on the same LV and no non-trivial topologies (such as symlinks or bind mounts) are involved, otherwise a BackupException is raised. Return a tuple of (devnum, mountpoint) """ devnum = None mountpoint = None fstype = None for d in self.dirs: mp = mntinfo.mountpoint_for_path(d) e = list(mntinfo.entries_for_mountpoint(mp)) if len(e) != 1: raise BackupException('Expected exactly one mountpoint for dir', d, str(e)) if e[0].root != ROOT: raise BackupException('Mountpoint is a bind mount, which is not supported', str(e[0])) dn = e[0].devnum if devnum is None: devnum = dn mountpoint = mp fstype = e[0].fstype continue if dn != devnum or mp != mountpoint: raise BackupException('Mismatching device numbers/mountpoints', dn, devnum, mp, mountpoint) # TODO? check that there are no symlinks? # by running stat maybe? return (devnum, mountpoint, fstype) def _resolve_lv(self, ssh, devnum): """ Find the logical volume for the given device number. Return its full name, i.e. vgname/lvname """ major = devnum >> 8 minor = devnum & 255 res = self._paramiko_exec_cmd(ssh, 'lvs --select "kernel_major={major}&&kernel_minor={minor}" ' '--noheadings -o lv_full_name'.format(major = major, minor = minor)) lv_name = res.strip() # valid LV paths are volname/lvname, each non-empty alphanumeric+_ if not re.fullmatch(r'\w+/\w+', lv_name, re.ASCII): raise BackupException('Invalid LV path', lv_name) return lv_name @contextlib.contextmanager def _snapshot_lv(self, ssh, devnum): """ Return a context manager that creates a read-only LVM snapshot for the specified LV device number and destroys it at exit. """ lv_fullname = self._resolve_lv(ssh, devnum) self._logger.debug('LV volume to snapshot is %s', lv_fullname) vg_name = lv_fullname.split('/')[0] # create a read-only snapshot with a random name # make sure snapshot name starts with a letter snapshot_name = 'a' + secrets.token_urlsafe() snapshot_fullname = '%s/%s' % (vg_name, snapshot_name) self._paramiko_exec_cmd(ssh, 'lvcreate --permission r --snapshot -L {size} -n {name} {origin}' .format(size = self._snapshot_size, name = snapshot_name, origin = lv_fullname)) try: # get the path to the snapshot device node res = self._paramiko_exec_cmd(ssh, 'lvs --select "lv_full_name=%s" --noheadings -o lv_path' % snapshot_fullname) lv_path = res.strip() if not lv_path.startswith('/'): raise BackupException('Got invalid snapshot LV path', lv_path) self._logger.debug('Created snapshot %s at %s', snapshot_fullname, lv_path) yield lv_path finally: self._paramiko_exec_cmd(ssh, 'lvremove -f %s' % snapshot_fullname) self._logger.debug('Removed snapshot %s', snapshot_fullname) @contextlib.contextmanager def _mount_snapshot(self, ssh, devnum, mount_path): """ Return a context manager that creates a read-only LVM snapshot for the specified LV device number and mounts it at mount_path, then unmounts and destroys it at exit. """ with self._snapshot_lv(ssh, devnum) as lv_path: try: self._paramiko_exec_cmd(ssh, 'mount -oro %s %s' % (lv_path, mount_path)) yield None finally: self._paramiko_exec_cmd(ssh, 'umount %s' % mount_path) def save(self, dry_run = False): with contextlib.ExitStack() as stack: conn_tgt = stack.enter_context(_ssh_client.SSHConnection(self._remote)) remote_root = SSHRemote(self._remote.host, self._remote.port, 'root', self._remote.proxy_remote) conn_root = stack.enter_context(_ssh_client.SSHConnection(remote_root)) # resolve the path to BUP_DIR on the remote bupdir = self._resolve_remote_bupdir(conn_tgt) # make sure the mount directory exists # due to how bup index works, the mount directory has to stay the # same for each backup # we use BUP_DIR/lbup_mount snapshot_mount = '%s/%s' % (bupdir, 'lbup_mount') self._paramiko_exec_cmd(conn_tgt, 'mkdir -p -m 700 ' + snapshot_mount) # read and parse the mountinfo table mntinfo = MountInfo( self._paramiko_exec_cmd(conn_root, 'cat /proc/1/mountinfo', decode = False)) devnum, mountpoint, _ = self._resolve_mntdev(mntinfo) self._logger.debug('Backup targets are at device %s, mounted at %s', "%d:%d" % (devnum >> 8, devnum & 255), mountpoint) stack.enter_context(self._mount_snapshot(conn_root, devnum, snapshot_mount)) bup_exec = ['bup', 'on', '%s@%s' % (self._remote.username, self._remote.host), '-d', bupdir] reparent = (mountpoint, AbsPath(snapshot_mount)) return self._do_save(bup_exec, dry_run, reparent = reparent, save_opts = ['--graft=%s=%s' % (snapshot_mount, mountpoint)], index_opts = ['--no-check-device']) class TargetSSHLXCLVM(TargetSSHLVM): """ This target backs up an LXC container that lives on its own LVM logical volume. Requires root-capable login on the container's host. :param SSHRemote parent_remote: """ # The container is treated as untrusted, so all code here needs to be # careful to avoid giving it access to anything it would not have otherwise. # Specifically, any information obtained by running binaries in the # container should be assumed to have been potentially maliciously # manipulated. No binaries from the container should be run as the (host) root. _parent_remote = None _lxc_username = None _lxc_containername = None def __init__(self, name, dirs, excludes = None, logger = None, target_remote = None, target_remote_bupdir = None, parent_remote = None, lxc_username = None, lxc_containername = None, snapshot_size = '20G'): if parent_remote is None: raise ValueError('parent_remote not specified') if lxc_username is None: lxc_username = parent_remote.usename self._parent_remote = parent_remote self._lxc_username = lxc_username self._lxc_containername = lxc_containername super().__init__(name, dirs, excludes, logger, target_remote, target_remote_bupdir, snapshot_size) def __str__(self): return "%s{LXC:%s/%s@[%s]}{LVM:%s}" % (super().__str__(), self._lxc_containername, self._lxc_username, str(self._parent_remote), self._snapshot_size) def save(self, dry_run = False): with contextlib.ExitStack() as stack: parent = stack.enter_context(_ssh_client.SSHConnection(self._parent_remote)) container = stack.enter_context(_ssh_client.SSHConnection(self._remote)) # resolve the path to BUP_DIR on the container container_bupdir = self._resolve_remote_bupdir(container) # make sure the mount directory exists # due to how bup index works, the mount directory has to stay the # same for each backup # we use BUP_DIR/lbup_mount container_mountpoint = '%s/%s' % (container_bupdir, 'lbup_mount') self._paramiko_exec_cmd(container, 'mkdir -p -m 700 ' + container_mountpoint) # get the PID of the container's init cmd_template = 'su -s /bin/sh -c "{command}" %s' % self._lxc_username container_pid = self._paramiko_exec_cmd(parent, cmd_template.format( command = 'lxc-info -H -p -n %s' % self._lxc_containername)).rstrip('\n') # make sure it's a number if not re.fullmatch('[0-9]+', container_pid): raise BackupException('Invalid container PID: %s' % container_pid) # read and parse the mountinfo table for the container init # which runs in the container's mount namespace mntinfo = MountInfo( self._paramiko_exec_cmd(parent, 'cat /proc/%s/mountinfo' % container_pid, decode = False)) lv_devnum, lv_mountpoint, lv_fstype = self._resolve_mntdev(mntinfo) # make sure we have a valid fstype lv_fstype = lv_fstype.decode('ascii') if not re.fullmatch(r'\w+', lv_fstype, re.ASCII): raise BackupException('Invalid LV FS type', lv_fstype) self._logger.debug('Backup targets are at device %s(%s), mounted at %s', "%d:%d" % (lv_devnum >> 8, lv_devnum & 255), lv_fstype, lv_mountpoint) snapshot_path = stack.enter_context(self._snapshot_lv(parent, lv_devnum)) # execute the backup # we cannot trust any binaries located inside the container, since a # compromised container could use them to execute arbitrary code # with real root privileges, thus nullifying the point of # unprivileged containers) # so we ship a special tool, 'nsmount', which has to be # installed on the parent, to mount the snapshot into the # container mount namespace self._paramiko_exec_cmd(parent, 'nsmount m {pid} {mountpoint} {devpath} {fstype}'.format( pid = container_pid, mountpoint = container_mountpoint, devpath = snapshot_path, fstype = lv_fstype)) bup_exec = ['bup', 'on', '%s@%s' % (self._remote.username, self._remote.host), '-d', container_bupdir] save_opts = ['--graft=%s=%s' % (container_mountpoint, lv_mountpoint)] reparent = (lv_mountpoint, AbsPath(container_mountpoint)) try: ret = self._do_save(bup_exec, dry_run, reparent = reparent, save_opts = save_opts, index_opts = ['--no-check-device']) finally: self._paramiko_exec_cmd(parent, 'nsmount u {pid} {mountpoint}'.format( pid = container_pid, mountpoint = container_mountpoint)) return ret