1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
|
from abc import ABC, abstractmethod
import contextlib
import errno
import logging
import re
import socket
import subprocess
from .exceptions import BackupException, RemoteExecException
from . import repository
from . import ssh_remote
from . import _ssh_client
def _parse_name(name):
"""
Parse a backup name into a remote specification.
"""
# split off the username
if not '@' in name:
raise ValueError('Invalid backup name: "%s", must be of format user@host')
username, _, host = name.partition('@')
port = 22 # overridden later if specified in name
colons = host.count(':')
if colons >= 2: # IPv6 literal, possibly with port
m = re.match(r'\[(.+)\](:\d+)?', host, re.ASCII | re.IGNORECASE)
if m is not None: # [literal]:port
host, port = m.groups()
elif colons == 1: # host:port
host, _, port = host.partition(':')
return ssh_remote.SSHRemote(host, port, username)
class Target(ABC):
name = None
dirs = None
excludes = None
_logger = None
def __init__(self, name, dirs, excludes = None, logger = None):
if excludes is None:
excludes = []
self.name = name
self.dirs = dirs
self.excludes = excludes
if logger is None:
self._logger = logging.getLogger(self.name)
else:
self._logger = logger
def __str__(self):
return "Target{%s/%s}" % (self.dirs, self.excludes)
def _log_command(self, name, retcode, stdout, stderr):
self._logger.debug('%s finished with return code %d' % (name, retcode))
def sanitize(b):
LOG_LEN = 128
# truncate and decode
s = b[:LOG_LEN].decode('utf-8', errors = 'backslashreplace')
# replace newlines with literal \n's
s = r'\n'.join(s.splitlines())
# add ellipsis if truncated
if len(b) > LOG_LEN:
s += '[...]'
return s
if len(stdout) > 0:
self._logger.debug('%s stdout: %s' % (name, sanitize(stdout)))
if len(stderr) > 0:
self._logger.debug('%s stderr: %s' % (name, sanitize(stderr)))
def _do_save(self, bup_exec, path_prefix = '', index_opts = None, save_opts = None):
excludes = [path_prefix + '/' + e for e in self.excludes]
dirs = [path_prefix + '/' + d for d in self.dirs]
if index_opts is None:
index_opts = []
if save_opts is None:
save_opts = []
# index
cmd = bup_exec + ['index', '--update', '--one-file-system'] + index_opts
cmd.extend(['--exclude=%s' % e for e in excludes])
cmd.extend(dirs)
self._logger.debug('Executing index command: ' + str(cmd))
res_idx = subprocess.run(cmd, capture_output = True)
self._log_command('Index', res_idx.returncode,
res_idx.stdout, res_idx.stderr)
# save
cmd = bup_exec + ['save', '-n', self.name] + save_opts + dirs
self._logger.debug('Executing save command: ' + str(cmd))
res_save = subprocess.run(cmd, capture_output = True)
self._log_command('Save', res_save.returncode,
res_save.stdout, res_save.stderr)
retcode = 0
output = b''
if res_idx.returncode != 0:
retcode = res_idx.returncode
output += res_idx.stderr + res_idx.stdout
if res_save.returncode != 0:
retcode = res_save.returncode
output += res_save.stderr + res_save.stdout
result = repository.StepResult(retcode == 0, output)
return result
@abstractmethod
def save(self, data_dir):
pass
class TargetLocal(Target):
def save(self, data_dir):
return self._do_save(['bup'])
class TargetSSH(Target):
_remote = None
def __init__(self, name, dirs, excludes = None, logger = None,
remote = None, remote_bupdir = None):
if remote is None:
remote = _parse_name(name)
if remote.proxy_remote is not None:
raise NotImplementedError('Proxy remote not implemented')
if remote.port != 22:
raise NotImplementedError('Specifying port not implemented')
self._remote = remote
if remote_bupdir is None:
remote_bupdir = '$HOME/.bup'
self._remote_bupdir = remote_bupdir
super().__init__(name, dirs, excludes, logger)
def __str__(self):
return "%s{SSH:%s}" % (super().__str__(), str(self._remote))
def _paramiko_exec_cmd(self, client, cmd):
self._logger.debug('Client %s: executing command: %s' % (client, cmd))
res = client.exec_command(cmd)
chan = res[0].channel
chan.settimeout(64)
try:
out, err = res[1].read(), res[2].read()
except socket.timeout as t:
raise RemoteExecException('Timeout waiting for command output',
errno.ETIMEDOUT, b'') from t
chan.recv_exit_status()
if chan.exit_status != 0:
raise RemoteExecException('Error executing "%s"' % cmd,
chan.exit_status, err + out)
self._log_command('Remote command', chan.exit_status, out, err)
return out.decode('utf-8', errors = 'backslashreplace')
def _resolve_remote_bupdir(self, ssh):
bupdir = self._paramiko_exec_cmd(ssh, 'realpath -e ' + self._remote_bupdir).splitlines()
if (len(bupdir) != 1 or len(bupdir[0]) <= 1 or bupdir[0][0] != '/' or
re.search(r'\s', bupdir[0])):
raise BackupException('Invalid BUP_DIR on the remote target: %s' % str(bupdir))
return bupdir[0]
def save(self, data_dir):
with _ssh_client.SSHConnection(self._remote) as ssh:
remote_bupdir = self._resolve_remote_bupdir(ssh)
bup_exec = ['bup', 'on', '%s@%s' % (self._remote.username, self._remote.host),
'-d', remote_bupdir]
return self._do_save(['bup', 'on', '%s@%s' % (self._remote.username, self._remote.host)])
class TargetSSHLXCLVM(TargetSSH):
"""
This target backs up an LXC container that lives on its own LVM logical
volume. Requires root-capable login on the container's host.
:param SSHRemote parent_remote:
"""
_parent_remote = None
_lxc_username = None
_lxc_containername = None
_snapshot_size = None
def __init__(self, name, dirs, excludes = None, logger = None,
target_remote = None, target_remote_bupdir = None,
parent_remote = None,
lxc_username = None, lxc_containername = None,
snapshot_size = '20G'):
if parent_remote is None:
raise ValueError('parent_remote not specified')
if lxc_username is None:
lxc_username = parent_remote.usename
self._parent_remote = parent_remote
self._lxc_username = lxc_username
self._lxc_containername = lxc_containername
self._snapshot_size = snapshot_size
super().__init__(name, dirs, excludes, logger, target_remote, target_remote_bupdir)
def __str__(self):
return "%s{LXC:%s/%s@[%s]}{LVM:%s}" % (super().__str__(), self._lxc_containername,
self._lxc_username, str(self._parent_remote), self._snapshot_size)
def save(self, data_dir):
with contextlib.ExitStack() as stack:
parent = stack.enter_context(_ssh_client.SSHConnection(self._parent_remote))
container = stack.enter_context(_ssh_client.SSHConnection(self._remote))
# resolve the path to BUP_DIR on the container
container_bupdir = self._resolve_remote_bupdir(container)
# make sure the mount directory exists
# due to how bup index works, the mount directory has to stay the
# same for each backup
# we use BUP_DIR/lbup_mount
container_mountpoint = '%s/%s' % (container_bupdir, 'lbup_mount')
self._paramiko_exec_cmd(container, 'mkdir -p -m 700 ' + container_mountpoint)
save_opts = ['--strip-path', container_mountpoint]
# get the PID of the container's init
cmd_template = 'su -s /bin/sh -c "{command}" %s' % self._lxc_username
container_pid = self._paramiko_exec_cmd(parent, cmd_template.format(
command = 'lxc-info -H -p -n %s' % self._lxc_containername)).rstrip('\n')
# make sure it's a number
if not re.fullmatch('[0-9]+', container_pid):
raise BackupException('Invalid container PID: %s' % container_pid)
# get the LV/VG for the container's rootfs
container_rootfs = self._paramiko_exec_cmd(parent, cmd_template.format(
command = 'lxc-info -H -c lxc.rootfs.path -n %s' %
self._lxc_containername)).rstrip('\n')
# oct-escape certain characters as they are in /proc/mounts
# see seq_path[_root]() in linux
container_rootfs = container_rootfs.translate(
{ ord(' ') : r'\040', ord('\t') : r'\011',
ord('\n') : r'\012', ord('\\') : r'\0134'})
# make sure the rootfs path is
# - non-empty
# - an absolute path
# - contains no whitespace
if (len(container_rootfs) <= 1 or container_rootfs[0] != '/' or
re.search(r'\s', container_rootfs)):
raise BackupException('Unxpected container rootfs directory: %s' % container_rootfs)
# find the device node and the filesystem type for the container rootfs
mountlines = self._paramiko_exec_cmd(parent,
'grep "%s" /proc/mounts' % container_rootfs).splitlines()
if len(mountlines) != 1:
raise BackupException('Expected exactly one matching mount line for the '
'container root, got %d' % len(mountlines))
mountline = mountlines[0].split()
if len(mountline) < 2 or mountline[1] != container_rootfs:
raise BackupException('Invalid mount line: %s' % mountline)
lv_path = mountline[0]
lv_fstype = mountline[2]
# make sure the LV path is
# - non-empty
# - an absolute path
# - contains no whitespace
# and that the FS type is non-empty
if (len(lv_path) <= 1 or lv_path[0] != '/' or
re.search(r'\s', lv_path) or len(lv_fstype) < 1):
raise BackupException('Unexpected LV path/FS type: %s\t%s' % (lv_path, lv_fstype))
# find the LV and VG names
lvdisplay = self._paramiko_exec_cmd(parent,
'lvdisplay -C --noheadings -o lv_name,vg_name ' + lv_path).split()
if len(lvdisplay) != 2:
raise BackupException('Unexpected lvdisplay output: %s' % str(lvdisplay))
lv_name, vg_name = lvdisplay
if len(lv_name) < 1 or len(vg_name) < 1:
raise BackupException('Unexpected LV/VG name: %s\t%s' % (lv_name, vg_name))
# create a read-only snapshot
snapshot_name = 'lbup_' + lv_name
self._paramiko_exec_cmd(parent,
'lvcreate --permission r --snapshot -L {size} -n {name} {origin}'
.format(size = self._snapshot_size, name = snapshot_name,
origin = lv_path))
stack.callback(lambda: self._paramiko_exec_cmd(parent,
'lvremove -f %s/%s' % (vg_name, snapshot_name)))
# execute the backup
# wait for the new node to be created
self._paramiko_exec_cmd(parent, 'udevadm settle')
# we cannot trust any binaries located inside the container, since a
# compromised container could use them to execute arbitrary code
# with real root privileges, thus nullifying the point of
# unprivileged containers)
# so we ship a special tool, 'nsmount', which has to be
# installed on the parent, to mount the snapshot into the
# container mount namespace
self._paramiko_exec_cmd(parent,
'nsmount m {pid} {mountpoint} {devpath} {fstype}'.format(
pid = container_pid, mountpoint = container_mountpoint,
devpath = '/dev/%s/%s' % (vg_name, snapshot_name),
fstype = lv_fstype))
bup_exec = ['bup', 'on', '%s@%s' % (self._remote.username, self._remote.host),
'-d', container_bupdir]
try:
ret = self._do_save(bup_exec, path_prefix = container_mountpoint,
save_opts = save_opts, index_opts = ['--no-check-device'])
finally:
self._paramiko_exec_cmd(parent,
'nsmount u {pid} {mountpoint}'.format(
pid = container_pid, mountpoint = container_mountpoint))
return ret
|