Merge lp:~cjwatson/ubuntu-system-image/cdimage-custom into lp:~registry/ubuntu-system-image/client
- cdimage-custom
- Merge into client
Status: | Superseded |
---|---|
Proposed branch: | lp:~cjwatson/ubuntu-system-image/cdimage-custom |
Merge into: | lp:~registry/ubuntu-system-image/client |
Diff against target: |
7377 lines (+7236/-0) (has conflicts) 25 files modified
.bzrignore (+9/-0) README (+12/-0) bin/copy-image (+308/-0) bin/generate-keyrings (+87/-0) bin/generate-keys (+61/-0) bin/import-images (+305/-0) bin/set-phased-percentage (+90/-0) bin/si-shell (+79/-0) etc/config.example (+48/-0) lib/systemimage/config.py (+206/-0) lib/systemimage/diff.py (+242/-0) lib/systemimage/generators.py (+1173/-0) lib/systemimage/gpg.py (+239/-0) lib/systemimage/tools.py (+367/-0) lib/systemimage/tree.py (+999/-0) tests/generate-keys (+52/-0) tests/run (+60/-0) tests/test_config.py (+281/-0) tests/test_diff.py (+265/-0) tests/test_generators.py (+1039/-0) tests/test_gpg.py (+163/-0) tests/test_static.py (+78/-0) tests/test_tools.py (+297/-0) tests/test_tree.py (+679/-0) utils/check-latest (+97/-0) Conflict adding file .bzrignore. Moved existing file to .bzrignore.moved. |
To merge this branch: | bzr merge lp:~cjwatson/ubuntu-system-image/cdimage-custom |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Registry Administrators | Pending | ||
Review via email: mp+237941@code.launchpad.net |
This proposal has been superseded by a proposal from 2014-10-10.
Commit message
Add a new cdimage-custom generator.
Description of the change
Add a new cdimage-custom generator.
This is basically just a clone-and-hack of cdimage-ubuntu, simplified somewhat. It goes with recent changes to ubuntu-cdimage, and is all with the aim of being able to fix bug 1367332 (moving some click packages to /custom) in a single step for the community Ubuntu images.
Unmerged revisions
- 246. By Colin Watson
-
Add a new cdimage-custom generator.
- 245. By Stéphane Graber
-
Drop system/
android/ cache/recovery from core image. - 244. By Stéphane Graber
-
Hash http filepaths by default using a combination of the URL and version string, update code to pass current pep-8 test.
- 243. By Stéphane Graber
-
Fix variable names conflicts.
- 242. By Stéphane Graber
-
Use a comma as the separator to avoid ini parsing errors.
- 241. By Stéphane Graber
-
Add support for device overrides.
- 240. By Stéphane Graber
-
Add device name to version tarball.
- 239. By Stéphane Graber
-
Fix incorrect path for download cache in core image
- 238. By Stéphane Graber
-
Add /android/
cache/recovery to core builds for now. - 237. By Stéphane Graber
-
Skip android bits for non-touch
Preview Diff
1 | === added file '.bzrignore' | |||
2 | --- .bzrignore 1970-01-01 00:00:00 +0000 | |||
3 | +++ .bzrignore 2014-10-10 11:11:17 +0000 | |||
4 | @@ -0,0 +1,9 @@ | |||
5 | 1 | etc/config | ||
6 | 2 | lib/phablet/__pycache__ | ||
7 | 3 | secret/gpg/keyrings/* | ||
8 | 4 | secret/gpg/keys/* | ||
9 | 5 | secret/ssh/* | ||
10 | 6 | tests/coverage | ||
11 | 7 | tests/keys/* | ||
12 | 8 | www/* | ||
13 | 9 | state/* | ||
14 | 0 | 10 | ||
15 | === renamed file '.bzrignore' => '.bzrignore.moved' | |||
16 | === added file 'README' | |||
17 | --- README 1970-01-01 00:00:00 +0000 | |||
18 | +++ README 2014-10-10 11:11:17 +0000 | |||
19 | @@ -0,0 +1,12 @@ | |||
20 | 1 | Runtime dependencies: | ||
21 | 2 | - pxz | xz-utils | ||
22 | 3 | - python3, python3-gpgme | python, python-gpgme | ||
23 | 4 | - e2fsprogs | ||
24 | 5 | - android-tools-fsutils | ||
25 | 6 | - abootimg | ||
26 | 7 | |||
27 | 8 | Test dependencies: | ||
28 | 9 | - python-mock, python3-mock | ||
29 | 10 | - python-coverage, python3-coverage | ||
30 | 11 | - pep8 | ||
31 | 12 | - pyflakes3, pyflakes | ||
32 | 0 | 13 | ||
33 | === added directory 'bin' | |||
34 | === added file 'bin/copy-image' | |||
35 | --- bin/copy-image 1970-01-01 00:00:00 +0000 | |||
36 | +++ bin/copy-image 2014-10-10 11:11:17 +0000 | |||
37 | @@ -0,0 +1,308 @@ | |||
38 | 1 | #!/usr/bin/python | ||
39 | 2 | # -*- coding: utf-8 -*- | ||
40 | 3 | |||
41 | 4 | # Copyright (C) 2013 Canonical Ltd. | ||
42 | 5 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
43 | 6 | |||
44 | 7 | # This program is free software: you can redistribute it and/or modify | ||
45 | 8 | # it under the terms of the GNU General Public License as published by | ||
46 | 9 | # the Free Software Foundation; version 3 of the License. | ||
47 | 10 | # | ||
48 | 11 | # This program is distributed in the hope that it will be useful, | ||
49 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
50 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
51 | 14 | # GNU General Public License for more details. | ||
52 | 15 | # | ||
53 | 16 | # You should have received a copy of the GNU General Public License | ||
54 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
55 | 18 | |||
56 | 19 | import json | ||
57 | 20 | import os | ||
58 | 21 | import sys | ||
59 | 22 | sys.path.insert(0, os.path.join(sys.path[0], os.pardir, "lib")) | ||
60 | 23 | |||
61 | 24 | from systemimage import config, generators, tools, tree | ||
62 | 25 | |||
63 | 26 | import argparse | ||
64 | 27 | import fcntl | ||
65 | 28 | import logging | ||
66 | 29 | |||
67 | 30 | if __name__ == '__main__': | ||
68 | 31 | parser = argparse.ArgumentParser(description="image copier") | ||
69 | 32 | parser.add_argument("source_channel", metavar="SOURCE-CHANNEL") | ||
70 | 33 | parser.add_argument("destination_channel", metavar="DESTINATION-CHANNEL") | ||
71 | 34 | parser.add_argument("device", metavar="DEVICE") | ||
72 | 35 | parser.add_argument("version", metavar="VERSION", type=int) | ||
73 | 36 | parser.add_argument("-k", "--keep-version", action="store_true", | ||
74 | 37 | help="Keep the original verison number") | ||
75 | 38 | parser.add_argument("--verbose", "-v", action="count", default=0) | ||
76 | 39 | |||
77 | 40 | args = parser.parse_args() | ||
78 | 41 | |||
79 | 42 | # Setup logging | ||
80 | 43 | formatter = logging.Formatter( | ||
81 | 44 | "%(asctime)s %(levelname)s %(message)s") | ||
82 | 45 | |||
83 | 46 | levels = {1: logging.ERROR, | ||
84 | 47 | 2: logging.WARNING, | ||
85 | 48 | 3: logging.INFO, | ||
86 | 49 | 4: logging.DEBUG} | ||
87 | 50 | |||
88 | 51 | if args.verbose > 0: | ||
89 | 52 | stdoutlogger = logging.StreamHandler(sys.stdout) | ||
90 | 53 | stdoutlogger.setFormatter(formatter) | ||
91 | 54 | logging.root.setLevel(levels[min(4, args.verbose)]) | ||
92 | 55 | logging.root.addHandler(stdoutlogger) | ||
93 | 56 | else: | ||
94 | 57 | logging.root.addHandler(logging.NullHandler()) | ||
95 | 58 | |||
96 | 59 | # Load the configuration | ||
97 | 60 | conf = config.Config() | ||
98 | 61 | |||
99 | 62 | # Try to acquire a global lock | ||
100 | 63 | lock_file = os.path.join(conf.state_path, "global.lock") | ||
101 | 64 | lock_fd = open(lock_file, 'w') | ||
102 | 65 | |||
103 | 66 | try: | ||
104 | 67 | fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) | ||
105 | 68 | except IOError: | ||
106 | 69 | print("Something else holds the global lock. exiting.") | ||
107 | 70 | sys.exit(0) | ||
108 | 71 | |||
109 | 72 | # Load the tree | ||
110 | 73 | pub = tree.Tree(conf) | ||
111 | 74 | |||
112 | 75 | # Do some checks | ||
113 | 76 | if args.source_channel not in pub.list_channels(): | ||
114 | 77 | parser.error("Invalid source channel: %s" % args.source_channel) | ||
115 | 78 | |||
116 | 79 | if args.destination_channel not in pub.list_channels(): | ||
117 | 80 | parser.error("Invalid destination channel: %s" % | ||
118 | 81 | args.destination_channel) | ||
119 | 82 | |||
120 | 83 | if args.device not in pub.list_channels()[args.source_channel]['devices']: | ||
121 | 84 | parser.error("Invalid device for source channel: %s" % | ||
122 | 85 | args.device) | ||
123 | 86 | |||
124 | 87 | if args.device not in \ | ||
125 | 88 | pub.list_channels()[args.destination_channel]['devices']: | ||
126 | 89 | parser.error("Invalid device for destination channel: %s" % | ||
127 | 90 | args.device) | ||
128 | 91 | |||
129 | 92 | if "alias" in pub.list_channels()[args.source_channel] and \ | ||
130 | 93 | pub.list_channels()[args.source_channel]['alias'] \ | ||
131 | 94 | != args.source_channel: | ||
132 | 95 | parser.error("Source channel is an alias.") | ||
133 | 96 | |||
134 | 97 | if "alias" in pub.list_channels()[args.destination_channel] and \ | ||
135 | 98 | pub.list_channels()[args.destination_channel]['alias'] \ | ||
136 | 99 | != args.destination_channel: | ||
137 | 100 | parser.error("Destination channel is an alias.") | ||
138 | 101 | |||
139 | 102 | if "redirect" in pub.list_channels()[args.source_channel]: | ||
140 | 103 | parser.error("Source channel is a redirect.") | ||
141 | 104 | |||
142 | 105 | if "redirect" in pub.list_channels()[args.destination_channel]: | ||
143 | 106 | parser.error("Destination channel is a redirect.") | ||
144 | 107 | |||
145 | 108 | source_device = pub.get_device(args.source_channel, args.device) | ||
146 | 109 | destination_device = pub.get_device(args.destination_channel, args.device) | ||
147 | 110 | |||
148 | 111 | if args.keep_version: | ||
149 | 112 | images = [image for image in destination_device.list_images() | ||
150 | 113 | if image['version'] == args.version] | ||
151 | 114 | if images: | ||
152 | 115 | parser.error("Version number is already used: %s" % args.version) | ||
153 | 116 | |||
154 | 117 | # Assign a new version number | ||
155 | 118 | new_version = args.version | ||
156 | 119 | if not args.keep_version: | ||
157 | 120 | # Find the next available version | ||
158 | 121 | new_version = 1 | ||
159 | 122 | for image in destination_device.list_images(): | ||
160 | 123 | if image['version'] >= new_version: | ||
161 | 124 | new_version = image['version'] + 1 | ||
162 | 125 | logging.debug("Version for next image: %s" % new_version) | ||
163 | 126 | |||
164 | 127 | # Extract the build we want to copy | ||
165 | 128 | images = [image for image in source_device.list_images() | ||
166 | 129 | if image['type'] == "full" and image['version'] == args.version] | ||
167 | 130 | if not images: | ||
168 | 131 | parser.error("Can't find version: %s" % args.version) | ||
169 | 132 | source_image = images[0] | ||
170 | 133 | |||
171 | 134 | # Extract the list of existing full images | ||
172 | 135 | full_images = {image['version']: image | ||
173 | 136 | for image in destination_device.list_images() | ||
174 | 137 | if image['type'] == "full"} | ||
175 | 138 | |||
176 | 139 | # Check that the last full and the new image aren't one and the same | ||
177 | 140 | source_files = [entry['path'].split("/")[-1] | ||
178 | 141 | for entry in source_image['files'] | ||
179 | 142 | if not entry['path'].split("/")[-1].startswith("version-")] | ||
180 | 143 | destination_files = [] | ||
181 | 144 | if full_images: | ||
182 | 145 | latest_full = sorted(full_images.values(), | ||
183 | 146 | key=lambda image: image['version'])[-1] | ||
184 | 147 | destination_files = [entry['path'].split("/")[-1] | ||
185 | 148 | for entry in latest_full['files'] | ||
186 | 149 | if not entry['path'].split( | ||
187 | 150 | "/")[-1].startswith("version-")] | ||
188 | 151 | if source_files == destination_files: | ||
189 | 152 | parser.error("Source image is already latest full in " | ||
190 | 153 | "destination channel.") | ||
191 | 154 | |||
192 | 155 | # Generate a list of required deltas | ||
193 | 156 | delta_base = [] | ||
194 | 157 | |||
195 | 158 | if args.destination_channel in conf.channels: | ||
196 | 159 | for base_channel in conf.channels[args.destination_channel].deltabase: | ||
197 | 160 | # Skip missing channels | ||
198 | 161 | if base_channel not in pub.list_channels(): | ||
199 | 162 | continue | ||
200 | 163 | |||
201 | 164 | # Skip missing devices | ||
202 | 165 | if args.device not in (pub.list_channels() | ||
203 | 166 | [base_channel]['devices']): | ||
204 | 167 | continue | ||
205 | 168 | |||
206 | 169 | # Extract the latest full image | ||
207 | 170 | base_device = pub.get_device(base_channel, args.device) | ||
208 | 171 | base_images = sorted([image | ||
209 | 172 | for image in base_device.list_images() | ||
210 | 173 | if image['type'] == "full"], | ||
211 | 174 | key=lambda image: image['version']) | ||
212 | 175 | |||
213 | 176 | # Check if the version is valid and add it | ||
214 | 177 | if base_images and base_images[-1]['version'] in full_images: | ||
215 | 178 | if (full_images[base_images[-1]['version']] | ||
216 | 179 | not in delta_base): | ||
217 | 180 | delta_base.append(full_images | ||
218 | 181 | [base_images[-1]['version']]) | ||
219 | 182 | logging.debug("Source version for delta: %s" % | ||
220 | 183 | base_images[-1]['version']) | ||
221 | 184 | |||
222 | 185 | # Create new empty entries | ||
223 | 186 | new_images = {'full': {'files': []}} | ||
224 | 187 | for delta in delta_base: | ||
225 | 188 | new_images["delta_%s" % delta['version']] = {'files': []} | ||
226 | 189 | |||
227 | 190 | # Extract current version_detail and files | ||
228 | 191 | version_detail = "" | ||
229 | 192 | for entry in source_image['files']: | ||
230 | 193 | path = os.path.realpath("%s/%s" % (conf.publish_path, entry['path'])) | ||
231 | 194 | |||
232 | 195 | filename = path.split("/")[-1] | ||
233 | 196 | |||
234 | 197 | # Look for version-X.tar.xz | ||
235 | 198 | if filename == "version-%s.tar.xz" % args.version: | ||
236 | 199 | # Extract the metadata | ||
237 | 200 | if os.path.exists(path.replace(".tar.xz", ".json")): | ||
238 | 201 | with open(path.replace(".tar.xz", ".json"), "r") as fd: | ||
239 | 202 | metadata = json.loads(fd.read()) | ||
240 | 203 | if "channel.ini" in metadata: | ||
241 | 204 | version_detail = metadata['channel.ini'].get( | ||
242 | 205 | "version_detail", None) | ||
243 | 206 | else: | ||
244 | 207 | new_images['full']['files'].append(path) | ||
245 | 208 | logging.debug("Source version_detail is: %s" % version_detail) | ||
246 | 209 | |||
247 | 210 | # Generate new version tarball | ||
248 | 211 | environment = {} | ||
249 | 212 | environment['channel_name'] = args.destination_channel | ||
250 | 213 | environment['device'] = destination_device | ||
251 | 214 | environment['device_name'] = args.device | ||
252 | 215 | environment['version'] = new_version | ||
253 | 216 | environment['version_detail'] = [entry | ||
254 | 217 | for entry in version_detail.split(",") | ||
255 | 218 | if not entry.startswith("version=")] | ||
256 | 219 | environment['new_files'] = new_images['full']['files'] | ||
257 | 220 | |||
258 | 221 | logging.info("Generating new version tarball for '%s' (%s)" | ||
259 | 222 | % (new_version, "," % environment['version_detail'])) | ||
260 | 223 | version_path = generators.generate_file(conf, "version", [], environment) | ||
261 | 224 | if version_path: | ||
262 | 225 | new_images['full']['files'].append(version_path) | ||
263 | 226 | |||
264 | 227 | # Generate deltas | ||
265 | 228 | for abspath in new_images['full']['files']: | ||
266 | 229 | prefix = abspath.split("/")[-1].rsplit("-", 1)[0] | ||
267 | 230 | for delta in delta_base: | ||
268 | 231 | # Extract the source | ||
269 | 232 | src_path = None | ||
270 | 233 | for file_dict in delta['files']: | ||
271 | 234 | if (file_dict['path'].split("/")[-1] | ||
272 | 235 | .startswith(prefix)): | ||
273 | 236 | src_path = "%s/%s" % (conf.publish_path, | ||
274 | 237 | file_dict['path']) | ||
275 | 238 | break | ||
276 | 239 | |||
277 | 240 | # Check that it's not the current file | ||
278 | 241 | if src_path: | ||
279 | 242 | src_path = os.path.realpath(src_path) | ||
280 | 243 | |||
281 | 244 | # FIXME: the keyring- is a big hack... | ||
282 | 245 | if src_path == abspath and "keyring-" not in src_path: | ||
283 | 246 | continue | ||
284 | 247 | |||
285 | 248 | # Generators are allowed to return None when no delta | ||
286 | 249 | # exists at all. | ||
287 | 250 | logging.info("Generating delta from '%s' for '%s'" % | ||
288 | 251 | (delta['version'], | ||
289 | 252 | prefix)) | ||
290 | 253 | delta_path = generators.generate_delta(conf, src_path, | ||
291 | 254 | abspath) | ||
292 | 255 | else: | ||
293 | 256 | delta_path = abspath | ||
294 | 257 | |||
295 | 258 | if not delta_path: | ||
296 | 259 | continue | ||
297 | 260 | |||
298 | 261 | # Get the full and relative paths | ||
299 | 262 | delta_abspath, delta_relpath = tools.expand_path( | ||
300 | 263 | delta_path, conf.publish_path) | ||
301 | 264 | |||
302 | 265 | new_images['delta_%s' % delta['version']]['files'] \ | ||
303 | 266 | .append(delta_abspath) | ||
304 | 267 | |||
305 | 268 | # Add full image | ||
306 | 269 | logging.info("Publishing new image '%s' (%s) with %s files." | ||
307 | 270 | % (new_version, ",".join(environment['version_detail']), | ||
308 | 271 | len(new_images['full']['files']))) | ||
309 | 272 | destination_device.create_image("full", new_version, | ||
310 | 273 | ",".join(environment['version_detail']), | ||
311 | 274 | new_images['full']['files']) | ||
312 | 275 | |||
313 | 276 | # Add delta images | ||
314 | 277 | for delta in delta_base: | ||
315 | 278 | files = new_images["delta_%s" % delta['version']]['files'] | ||
316 | 279 | logging.info("Publishing new delta from '%s' (%s)" | ||
317 | 280 | " to '%s' (%s) with %s files" % | ||
318 | 281 | (delta['version'], delta.get("description", ""), | ||
319 | 282 | new_version, ",".join(environment['version_detail']), | ||
320 | 283 | len(files))) | ||
321 | 284 | |||
322 | 285 | destination_device.create_image( | ||
323 | 286 | "delta", new_version, | ||
324 | 287 | ",".join(environment['version_detail']), | ||
325 | 288 | files, | ||
326 | 289 | base=delta['version']) | ||
327 | 290 | |||
328 | 291 | # Expire images | ||
329 | 292 | if args.destination_channel in conf.channels: | ||
330 | 293 | if conf.channels[args.destination_channel].fullcount > 0: | ||
331 | 294 | logging.info("Expiring old images") | ||
332 | 295 | destination_device.expire_images( | ||
333 | 296 | conf.channels[args.destination_channel].fullcount) | ||
334 | 297 | |||
335 | 298 | # Sync all channel aliases | ||
336 | 299 | logging.info("Syncing any existing alias") | ||
337 | 300 | pub.sync_aliases(args.destination_channel) | ||
338 | 301 | |||
339 | 302 | # Remove any orphaned file | ||
340 | 303 | logging.info("Removing orphaned files from the pool") | ||
341 | 304 | pub.cleanup_tree() | ||
342 | 305 | |||
343 | 306 | # Sync the mirrors | ||
344 | 307 | logging.info("Triggering a mirror sync") | ||
345 | 308 | tools.sync_mirrors(conf) | ||
346 | 0 | 309 | ||
347 | === added file 'bin/generate-keyrings' | |||
348 | --- bin/generate-keyrings 1970-01-01 00:00:00 +0000 | |||
349 | +++ bin/generate-keyrings 2014-10-10 11:11:17 +0000 | |||
350 | @@ -0,0 +1,87 @@ | |||
351 | 1 | #!/usr/bin/python | ||
352 | 2 | # -*- coding: utf-8 -*- | ||
353 | 3 | |||
354 | 4 | # Copyright (C) 2013 Canonical Ltd. | ||
355 | 5 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
356 | 6 | |||
357 | 7 | # This program is free software: you can redistribute it and/or modify | ||
358 | 8 | # it under the terms of the GNU General Public License as published by | ||
359 | 9 | # the Free Software Foundation; version 3 of the License. | ||
360 | 10 | # | ||
361 | 11 | # This program is distributed in the hope that it will be useful, | ||
362 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
363 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
364 | 14 | # GNU General Public License for more details. | ||
365 | 15 | # | ||
366 | 16 | # You should have received a copy of the GNU General Public License | ||
367 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
368 | 18 | |||
369 | 19 | import os | ||
370 | 20 | import sys | ||
371 | 21 | import time | ||
372 | 22 | sys.path.insert(0, 'lib') | ||
373 | 23 | |||
374 | 24 | from systemimage import config | ||
375 | 25 | from systemimage import gpg | ||
376 | 26 | from systemimage import tools | ||
377 | 27 | |||
378 | 28 | conf = config.Config() | ||
379 | 29 | |||
380 | 30 | # archive-master keyring | ||
381 | 31 | if os.path.exists(os.path.join(conf.gpg_key_path, "archive-master")): | ||
382 | 32 | archive_master = gpg.Keyring(conf, "archive-master") | ||
383 | 33 | archive_master.set_metadata("archive-master") | ||
384 | 34 | archive_master.import_keys(os.path.join(conf.gpg_key_path, | ||
385 | 35 | "archive-master")) | ||
386 | 36 | path = archive_master.generate_tarball() | ||
387 | 37 | tools.xz_compress(path) | ||
388 | 38 | os.remove(path) | ||
389 | 39 | gpg.sign_file(conf, "archive-master", "%s.xz" % path) | ||
390 | 40 | |||
391 | 41 | # image-master keyring | ||
392 | 42 | if os.path.exists(os.path.join(conf.gpg_key_path, "image-master")) and \ | ||
393 | 43 | os.path.exists(os.path.join(conf.gpg_key_path, "archive-master")): | ||
394 | 44 | image_master = gpg.Keyring(conf, "image-master") | ||
395 | 45 | image_master.set_metadata("image-master") | ||
396 | 46 | image_master.import_keys(os.path.join(conf.gpg_key_path, "image-master")) | ||
397 | 47 | path = image_master.generate_tarball() | ||
398 | 48 | tools.xz_compress(path) | ||
399 | 49 | os.remove(path) | ||
400 | 50 | gpg.sign_file(conf, "archive-master", "%s.xz" % path) | ||
401 | 51 | |||
402 | 52 | # image-signing keyring | ||
403 | 53 | if os.path.exists(os.path.join(conf.gpg_key_path, "image-signing")) and \ | ||
404 | 54 | os.path.exists(os.path.join(conf.gpg_key_path, "image-master")): | ||
405 | 55 | image_signing = gpg.Keyring(conf, "image-signing") | ||
406 | 56 | image_signing.set_metadata("image-signing", | ||
407 | 57 | int(time.strftime("%s", | ||
408 | 58 | time.localtime())) + 63072000) | ||
409 | 59 | image_signing.import_keys(os.path.join(conf.gpg_key_path, "image-signing")) | ||
410 | 60 | path = image_signing.generate_tarball() | ||
411 | 61 | tools.xz_compress(path) | ||
412 | 62 | os.remove(path) | ||
413 | 63 | gpg.sign_file(conf, "image-master", "%s.xz" % path) | ||
414 | 64 | |||
415 | 65 | # device-signing keyring | ||
416 | 66 | if os.path.exists(os.path.join(conf.gpg_key_path, "device-signing")) and \ | ||
417 | 67 | os.path.exists(os.path.join(conf.gpg_key_path, "image-signing")): | ||
418 | 68 | device_signing = gpg.Keyring(conf, "device-signing") | ||
419 | 69 | device_signing.set_metadata("device-signing", | ||
420 | 70 | int(time.strftime("%s", | ||
421 | 71 | time.localtime())) + 2678400) | ||
422 | 72 | device_signing.import_keys(os.path.join(conf.gpg_key_path, | ||
423 | 73 | "device-signing")) | ||
424 | 74 | path = device_signing.generate_tarball() | ||
425 | 75 | tools.xz_compress(path) | ||
426 | 76 | os.remove(path) | ||
427 | 77 | gpg.sign_file(conf, "image-signing", "%s.xz" % path) | ||
428 | 78 | |||
429 | 79 | # blacklist keyring | ||
430 | 80 | if os.path.exists(os.path.join(conf.gpg_key_path, "blacklist")) and \ | ||
431 | 81 | os.path.exists(os.path.join(conf.gpg_key_path, "image-master")): | ||
432 | 82 | blacklist = gpg.Keyring(conf, "blacklist") | ||
433 | 83 | blacklist.set_metadata("blacklist") | ||
434 | 84 | path = blacklist.generate_tarball() | ||
435 | 85 | tools.xz_compress(path) | ||
436 | 86 | os.remove(path) | ||
437 | 87 | gpg.sign_file(conf, "image-master", "%s.xz" % path) | ||
438 | 0 | 88 | ||
439 | === added file 'bin/generate-keys' | |||
440 | --- bin/generate-keys 1970-01-01 00:00:00 +0000 | |||
441 | +++ bin/generate-keys 2014-10-10 11:11:17 +0000 | |||
442 | @@ -0,0 +1,61 @@ | |||
443 | 1 | #!/usr/bin/python | ||
444 | 2 | # -*- coding: utf-8 -*- | ||
445 | 3 | # | ||
446 | 4 | # Copyright (C) 2014 Canonical Ltd. | ||
447 | 5 | # Author: Timothy Chavez <timothy.chavez@canonical.com> | ||
448 | 6 | # | ||
449 | 7 | # This program is free software: you can redistribute it and/or modify | ||
450 | 8 | # it under the terms of the GNU General Public License as published by | ||
451 | 9 | # the Free Software Foundation; version 3 of the License. | ||
452 | 10 | # | ||
453 | 11 | # This program is distributed in the hope that it will be useful, | ||
454 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
455 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
456 | 14 | # GNU General Public License for more details. | ||
457 | 15 | # | ||
458 | 16 | # You should have received a copy of the GNU General Public License | ||
459 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
460 | 18 | |||
461 | 19 | import argparse | ||
462 | 20 | import os | ||
463 | 21 | import sys | ||
464 | 22 | |||
465 | 23 | sys.path.insert(0, 'lib') | ||
466 | 24 | from systemimage import config | ||
467 | 25 | from systemimage.gpg import generate_signing_key | ||
468 | 26 | |||
469 | 27 | |||
470 | 28 | KEYS = { | ||
471 | 29 | "archive-master": ("{0} Archive Master key", 0), | ||
472 | 30 | "image-master": ("{0} Image Master key", 0), | ||
473 | 31 | "device-signing": ("{0} Device Signing key", "2y"), | ||
474 | 32 | "image-signing": ("{0} Image Signing key", "2y") | ||
475 | 33 | } | ||
476 | 34 | |||
477 | 35 | |||
478 | 36 | def main(): | ||
479 | 37 | parser = argparse.ArgumentParser(description='Generate signing keya.') | ||
480 | 38 | parser.add_argument("--email", dest="email", required=True, | ||
481 | 39 | help="An email address to associate with the keys") | ||
482 | 40 | parser.add_argument("--prefix", dest="prefix", required=True, | ||
483 | 41 | help="A prefix to include in the key name") | ||
484 | 42 | args = parser.parse_args() | ||
485 | 43 | |||
486 | 44 | conf = config.Config() | ||
487 | 45 | |||
488 | 46 | print("I: Generating signing keys...") | ||
489 | 47 | |||
490 | 48 | for key_id, (key_name, key_expiry) in KEYS.iteritems(): | ||
491 | 49 | key_path = os.path.join(conf.gpg_key_path, key_id) | ||
492 | 50 | if os.path.exists(key_path): | ||
493 | 51 | print("W: The key \"{0}\" already exists".format(key_id)) | ||
494 | 52 | continue | ||
495 | 53 | os.makedirs(key_path) | ||
496 | 54 | generate_signing_key( | ||
497 | 55 | key_path, key_name.format(args.prefix), args.email, key_expiry) | ||
498 | 56 | |||
499 | 57 | print("I: Done") | ||
500 | 58 | |||
501 | 59 | |||
502 | 60 | if __name__ == "__main__": | ||
503 | 61 | main() | ||
504 | 0 | 62 | ||
505 | === added file 'bin/import-images' | |||
506 | --- bin/import-images 1970-01-01 00:00:00 +0000 | |||
507 | +++ bin/import-images 2014-10-10 11:11:17 +0000 | |||
508 | @@ -0,0 +1,305 @@ | |||
509 | 1 | #!/usr/bin/python | ||
510 | 2 | # -*- coding: utf-8 -*- | ||
511 | 3 | |||
512 | 4 | # Copyright (C) 2013 Canonical Ltd. | ||
513 | 5 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
514 | 6 | |||
515 | 7 | # This program is free software: you can redistribute it and/or modify | ||
516 | 8 | # it under the terms of the GNU General Public License as published by | ||
517 | 9 | # the Free Software Foundation; version 3 of the License. | ||
518 | 10 | # | ||
519 | 11 | # This program is distributed in the hope that it will be useful, | ||
520 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
521 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
522 | 14 | # GNU General Public License for more details. | ||
523 | 15 | # | ||
524 | 16 | # You should have received a copy of the GNU General Public License | ||
525 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
526 | 18 | |||
527 | 19 | import os | ||
528 | 20 | import sys | ||
529 | 21 | sys.path.insert(0, os.path.join(sys.path[0], os.pardir, "lib")) | ||
530 | 22 | |||
531 | 23 | from systemimage import config, generators, tools, tree | ||
532 | 24 | |||
533 | 25 | import argparse | ||
534 | 26 | import fcntl | ||
535 | 27 | import logging | ||
536 | 28 | |||
537 | 29 | if __name__ == '__main__': | ||
538 | 30 | parser = argparse.ArgumentParser(description="image importer") | ||
539 | 31 | parser.add_argument("--verbose", "-v", action="count", default=0) | ||
540 | 32 | args = parser.parse_args() | ||
541 | 33 | |||
542 | 34 | # Setup logging | ||
543 | 35 | formatter = logging.Formatter( | ||
544 | 36 | "%(asctime)s %(levelname)s %(message)s") | ||
545 | 37 | |||
546 | 38 | levels = {1: logging.ERROR, | ||
547 | 39 | 2: logging.WARNING, | ||
548 | 40 | 3: logging.INFO, | ||
549 | 41 | 4: logging.DEBUG} | ||
550 | 42 | |||
551 | 43 | if args.verbose > 0: | ||
552 | 44 | stdoutlogger = logging.StreamHandler(sys.stdout) | ||
553 | 45 | stdoutlogger.setFormatter(formatter) | ||
554 | 46 | logging.root.setLevel(levels[min(4, args.verbose)]) | ||
555 | 47 | logging.root.addHandler(stdoutlogger) | ||
556 | 48 | else: | ||
557 | 49 | logging.root.addHandler(logging.NullHandler()) | ||
558 | 50 | |||
559 | 51 | # Load the configuration | ||
560 | 52 | conf = config.Config() | ||
561 | 53 | |||
562 | 54 | # Try to acquire a global lock | ||
563 | 55 | lock_file = os.path.join(conf.state_path, "global.lock") | ||
564 | 56 | lock_fd = open(lock_file, 'w') | ||
565 | 57 | |||
566 | 58 | try: | ||
567 | 59 | fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) | ||
568 | 60 | except IOError: | ||
569 | 61 | logging.info("Something else holds the global lock. exiting.") | ||
570 | 62 | sys.exit(0) | ||
571 | 63 | |||
572 | 64 | # Load the tree | ||
573 | 65 | pub = tree.Tree(conf) | ||
574 | 66 | |||
575 | 67 | # Iterate through the channels | ||
576 | 68 | for channel_name, channel in conf.channels.items(): | ||
577 | 69 | # We're only interested in automated channels | ||
578 | 70 | if channel.type != "auto": | ||
579 | 71 | logging.debug("Skipping non-auto channel: %s" % channel_name) | ||
580 | 72 | continue | ||
581 | 73 | |||
582 | 74 | logging.info("Processing channel: %s" % channel_name) | ||
583 | 75 | |||
584 | 76 | # Check the channel exists | ||
585 | 77 | if channel_name not in pub.list_channels(): | ||
586 | 78 | logging.error("Invalid channel name: %s" % channel_name) | ||
587 | 79 | continue | ||
588 | 80 | |||
589 | 81 | # Iterate through the devices | ||
590 | 82 | for device_name in pub.list_channels()[channel_name]['devices']: | ||
591 | 83 | logging.info("Processing device: %s" % device_name) | ||
592 | 84 | |||
593 | 85 | device = pub.get_device(channel_name, device_name) | ||
594 | 86 | |||
595 | 87 | # Extract last full version | ||
596 | 88 | full_images = {image['version']: image | ||
597 | 89 | for image in device.list_images() | ||
598 | 90 | if image['type'] == "full"} | ||
599 | 91 | |||
600 | 92 | last_full = None | ||
601 | 93 | if full_images: | ||
602 | 94 | last_full = sorted(full_images.values(), | ||
603 | 95 | key=lambda image: image['version'])[-1] | ||
604 | 96 | logging.debug("Last full image: %s" % last_full['version']) | ||
605 | 97 | else: | ||
606 | 98 | logging.debug("This is the first full image.") | ||
607 | 99 | |||
608 | 100 | # Extract all delta base versions | ||
609 | 101 | delta_base = [] | ||
610 | 102 | |||
611 | 103 | for base_channel in channel.deltabase: | ||
612 | 104 | # Skip missing channels | ||
613 | 105 | if base_channel not in pub.list_channels(): | ||
614 | 106 | logging.warn("Invalid base channel: %s" % base_channel) | ||
615 | 107 | continue | ||
616 | 108 | |||
617 | 109 | # Skip missing devices | ||
618 | 110 | if device_name not in (pub.list_channels() | ||
619 | 111 | [base_channel]['devices']): | ||
620 | 112 | logging.warn("Missing device in base channel: %s in %s" % | ||
621 | 113 | (device_name, base_channel)) | ||
622 | 114 | continue | ||
623 | 115 | |||
624 | 116 | # Extract the latest full image | ||
625 | 117 | base_device = pub.get_device(base_channel, device_name) | ||
626 | 118 | base_images = sorted([image | ||
627 | 119 | for image in base_device.list_images() | ||
628 | 120 | if image['type'] == "full"], | ||
629 | 121 | key=lambda image: image['version']) | ||
630 | 122 | |||
631 | 123 | # Check if the version is valid and add it | ||
632 | 124 | if base_images and base_images[-1]['version'] in full_images: | ||
633 | 125 | if (full_images[base_images[-1]['version']] | ||
634 | 126 | not in delta_base): | ||
635 | 127 | delta_base.append(full_images | ||
636 | 128 | [base_images[-1]['version']]) | ||
637 | 129 | logging.debug("Source version for delta: %s" % | ||
638 | 130 | base_images[-1]['version']) | ||
639 | 131 | |||
640 | 132 | # Allocate new version number | ||
641 | 133 | new_version = channel.versionbase | ||
642 | 134 | if last_full: | ||
643 | 135 | new_version = last_full['version'] + 1 | ||
644 | 136 | logging.debug("Version for next image: %s" % new_version) | ||
645 | 137 | |||
646 | 138 | # And the list used to generate version_detail | ||
647 | 139 | version_detail = [] | ||
648 | 140 | |||
649 | 141 | # And a list of new files | ||
650 | 142 | new_files = [] | ||
651 | 143 | |||
652 | 144 | # Keep track of what files we've processed | ||
653 | 145 | processed_files = [] | ||
654 | 146 | |||
655 | 147 | # Create new empty entries | ||
656 | 148 | new_images = {} | ||
657 | 149 | new_images['full'] = {'files': []} | ||
658 | 150 | for delta in delta_base: | ||
659 | 151 | new_images["delta_%s" % delta['version']] = {'files': []} | ||
660 | 152 | |||
661 | 153 | # Iterate through the files | ||
662 | 154 | for file_entry in channel.files: | ||
663 | 155 | # Deal with device specific overrides | ||
664 | 156 | if "," in file_entry['name']: | ||
665 | 157 | file_name, file_device = file_entry['name'].split(',', 1) | ||
666 | 158 | if file_device != device_name: | ||
667 | 159 | logging.debug("Skipping '%s' because the device name" | ||
668 | 160 | "doesn't match" % file_entry['name']) | ||
669 | 161 | continue | ||
670 | 162 | else: | ||
671 | 163 | file_name = file_entry['name'] | ||
672 | 164 | |||
673 | 165 | if file_name in processed_files: | ||
674 | 166 | logging.debug("Skipping '%s' because a more specific" | ||
675 | 167 | "generator was already called." | ||
676 | 168 | % file_entry['name']) | ||
677 | 169 | continue | ||
678 | 170 | |||
679 | 171 | processed_files.append(file_name) | ||
680 | 172 | |||
681 | 173 | # Generate the environment | ||
682 | 174 | environment = {} | ||
683 | 175 | environment['channel_name'] = channel_name | ||
684 | 176 | environment['device'] = device | ||
685 | 177 | environment['device_name'] = device_name | ||
686 | 178 | environment['version'] = new_version | ||
687 | 179 | environment['version_detail'] = version_detail | ||
688 | 180 | environment['new_files'] = new_files | ||
689 | 181 | |||
690 | 182 | # Call file generator | ||
691 | 183 | logging.info("Calling '%s' generator for a new file" | ||
692 | 184 | % file_entry['generator']) | ||
693 | 185 | path = generators.generate_file(conf, | ||
694 | 186 | file_entry['generator'], | ||
695 | 187 | file_entry['arguments'], | ||
696 | 188 | environment) | ||
697 | 189 | |||
698 | 190 | # Generators are allowed to return None when no build | ||
699 | 191 | # exists at all. This cancels the whole image. | ||
700 | 192 | if not path: | ||
701 | 193 | new_files = [] | ||
702 | 194 | logging.info("No image will be produced because the " | ||
703 | 195 | "'%s' generator returned None" % | ||
704 | 196 | file_entry['generator']) | ||
705 | 197 | break | ||
706 | 198 | |||
707 | 199 | # Get the full and relative paths | ||
708 | 200 | abspath, relpath = tools.expand_path(path, conf.publish_path) | ||
709 | 201 | urlpath = "/%s" % "/".join(relpath.split(os.sep)) | ||
710 | 202 | |||
711 | 203 | # FIXME: Extract the prefix, used later for matching between | ||
712 | 204 | # full images. This forces a specific filename format. | ||
713 | 205 | prefix = abspath.split("/")[-1].rsplit("-", 1)[0] | ||
714 | 206 | |||
715 | 207 | # Add the file to the full image | ||
716 | 208 | new_images['full']['files'].append(abspath) | ||
717 | 209 | |||
718 | 210 | # Check if same as current | ||
719 | 211 | new_file = True | ||
720 | 212 | if last_full: | ||
721 | 213 | for file_dict in last_full['files']: | ||
722 | 214 | if file_dict['path'] == urlpath: | ||
723 | 215 | new_file = False | ||
724 | 216 | break | ||
725 | 217 | |||
726 | 218 | if new_file: | ||
727 | 219 | logging.info("New file from '%s': %s" % | ||
728 | 220 | (file_entry['generator'], relpath)) | ||
729 | 221 | new_files.append(abspath) | ||
730 | 222 | else: | ||
731 | 223 | logging.info("File from '%s' is already current" % | ||
732 | 224 | (file_entry['generator'])) | ||
733 | 225 | |||
734 | 226 | # Generate deltas | ||
735 | 227 | for delta in delta_base: | ||
736 | 228 | # Extract the source | ||
737 | 229 | src_path = None | ||
738 | 230 | for file_dict in delta['files']: | ||
739 | 231 | if (file_dict['path'].split("/")[-1] | ||
740 | 232 | .startswith(prefix)): | ||
741 | 233 | src_path = "%s/%s" % (conf.publish_path, | ||
742 | 234 | file_dict['path']) | ||
743 | 235 | break | ||
744 | 236 | |||
745 | 237 | # Check that it's not the current file | ||
746 | 238 | if src_path: | ||
747 | 239 | src_path = os.path.realpath(src_path) | ||
748 | 240 | |||
749 | 241 | # FIXME: the keyring- is a big hack... | ||
750 | 242 | if src_path == abspath and "keyring-" not in src_path: | ||
751 | 243 | continue | ||
752 | 244 | |||
753 | 245 | # Generators are allowed to return None when no delta | ||
754 | 246 | # exists at all. | ||
755 | 247 | logging.info("Generating delta from '%s' for '%s'" % | ||
756 | 248 | (delta['version'], | ||
757 | 249 | file_entry['generator'])) | ||
758 | 250 | delta_path = generators.generate_delta(conf, src_path, | ||
759 | 251 | abspath) | ||
760 | 252 | else: | ||
761 | 253 | delta_path = abspath | ||
762 | 254 | |||
763 | 255 | if not delta_path: | ||
764 | 256 | continue | ||
765 | 257 | |||
766 | 258 | # Get the full and relative paths | ||
767 | 259 | delta_abspath, delta_relpath = tools.expand_path( | ||
768 | 260 | delta_path, conf.publish_path) | ||
769 | 261 | |||
770 | 262 | new_images['delta_%s' % delta['version']]['files'] \ | ||
771 | 263 | .append(delta_abspath) | ||
772 | 264 | |||
773 | 265 | # Check if we've got a new image | ||
774 | 266 | if len(new_files): | ||
775 | 267 | # Publish full image | ||
776 | 268 | logging.info("Publishing new image '%s' (%s) with %s files." | ||
777 | 269 | % (new_version, | ||
778 | 270 | ",".join(environment['version_detail']), | ||
779 | 271 | len(new_images['full']['files']))) | ||
780 | 272 | device.create_image("full", new_version, | ||
781 | 273 | ",".join(environment['version_detail']), | ||
782 | 274 | new_images['full']['files']) | ||
783 | 275 | # Publish deltas | ||
784 | 276 | for delta in delta_base: | ||
785 | 277 | files = new_images["delta_%s" % delta['version']]['files'] | ||
786 | 278 | logging.info("Publishing new delta from '%s' (%s)" | ||
787 | 279 | " to '%s' (%s) with %s files" % | ||
788 | 280 | (delta['version'], | ||
789 | 281 | delta.get("description", ""), | ||
790 | 282 | new_version, | ||
791 | 283 | ",".join(environment['version_detail']), | ||
792 | 284 | len(files))) | ||
793 | 285 | device.create_image( | ||
794 | 286 | "delta", new_version, | ||
795 | 287 | ",".join(environment['version_detail']), files, | ||
796 | 288 | base=delta['version']) | ||
797 | 289 | |||
798 | 290 | # Expire images | ||
799 | 291 | if channel.fullcount > 0: | ||
800 | 292 | logging.info("Expiring old images") | ||
801 | 293 | device.expire_images(channel.fullcount) | ||
802 | 294 | |||
803 | 295 | # Sync all channel aliases | ||
804 | 296 | logging.info("Syncing any existing alias") | ||
805 | 297 | pub.sync_aliases(channel_name) | ||
806 | 298 | |||
807 | 299 | # Remove any orphaned file | ||
808 | 300 | logging.info("Removing orphaned files from the pool") | ||
809 | 301 | pub.cleanup_tree() | ||
810 | 302 | |||
811 | 303 | # Sync the mirrors | ||
812 | 304 | logging.info("Triggering a mirror sync") | ||
813 | 305 | tools.sync_mirrors(conf) | ||
814 | 0 | 306 | ||
815 | === added file 'bin/set-phased-percentage' | |||
816 | --- bin/set-phased-percentage 1970-01-01 00:00:00 +0000 | |||
817 | +++ bin/set-phased-percentage 2014-10-10 11:11:17 +0000 | |||
818 | @@ -0,0 +1,90 @@ | |||
819 | 1 | #!/usr/bin/python | ||
820 | 2 | # -*- coding: utf-8 -*- | ||
821 | 3 | |||
822 | 4 | # Copyright (C) 2013 Canonical Ltd. | ||
823 | 5 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
824 | 6 | |||
825 | 7 | # This program is free software: you can redistribute it and/or modify | ||
826 | 8 | # it under the terms of the GNU General Public License as published by | ||
827 | 9 | # the Free Software Foundation; version 3 of the License. | ||
828 | 10 | # | ||
829 | 11 | # This program is distributed in the hope that it will be useful, | ||
830 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
831 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
832 | 14 | # GNU General Public License for more details. | ||
833 | 15 | # | ||
834 | 16 | # You should have received a copy of the GNU General Public License | ||
835 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
836 | 18 | |||
837 | 19 | import os | ||
838 | 20 | import sys | ||
839 | 21 | sys.path.insert(0, os.path.join(sys.path[0], os.pardir, "lib")) | ||
840 | 22 | |||
841 | 23 | from systemimage import config, tools, tree | ||
842 | 24 | |||
843 | 25 | import argparse | ||
844 | 26 | import logging | ||
845 | 27 | |||
846 | 28 | if __name__ == '__main__': | ||
847 | 29 | parser = argparse.ArgumentParser(description="set phased percentage") | ||
848 | 30 | parser.add_argument("channel", metavar="CHANNEL") | ||
849 | 31 | parser.add_argument("device", metavar="DEVICE") | ||
850 | 32 | parser.add_argument("version", metavar="VERSION", type=int) | ||
851 | 33 | parser.add_argument("percentage", metavar="PERCENTAGE", type=int) | ||
852 | 34 | parser.add_argument("--verbose", "-v", action="count") | ||
853 | 35 | |||
854 | 36 | args = parser.parse_args() | ||
855 | 37 | |||
856 | 38 | # Setup logging | ||
857 | 39 | formatter = logging.Formatter( | ||
858 | 40 | "%(asctime)s %(levelname)s %(message)s") | ||
859 | 41 | |||
860 | 42 | levels = {1: logging.ERROR, | ||
861 | 43 | 2: logging.WARNING, | ||
862 | 44 | 3: logging.INFO, | ||
863 | 45 | 4: logging.DEBUG} | ||
864 | 46 | |||
865 | 47 | if args.verbose > 0: | ||
866 | 48 | stdoutlogger = logging.StreamHandler(sys.stdout) | ||
867 | 49 | stdoutlogger.setFormatter(formatter) | ||
868 | 50 | logging.root.setLevel(levels[min(4, args.verbose)]) | ||
869 | 51 | logging.root.addHandler(stdoutlogger) | ||
870 | 52 | else: | ||
871 | 53 | logging.root.addHandler(logging.NullHandler()) | ||
872 | 54 | |||
873 | 55 | # Load the configuration | ||
874 | 56 | conf = config.Config() | ||
875 | 57 | |||
876 | 58 | # Load the tree | ||
877 | 59 | pub = tree.Tree(conf) | ||
878 | 60 | |||
879 | 61 | # Do some checks | ||
880 | 62 | if args.channel not in pub.list_channels(): | ||
881 | 63 | parser.error("Invalid channel: %s" % args.channel) | ||
882 | 64 | |||
883 | 65 | if args.device not in pub.list_channels()[args.channel]['devices']: | ||
884 | 66 | parser.error("Invalid device for source channel: %s" % | ||
885 | 67 | args.device) | ||
886 | 68 | |||
887 | 69 | if args.percentage < 0 or args.percentage > 100: | ||
888 | 70 | parser.error("Invalid value: %s" % args.percentage) | ||
889 | 71 | |||
890 | 72 | if "alias" in pub.list_channels()[args.channel] and \ | ||
891 | 73 | pub.list_channels()[args.channel]['alias'] != args.channel: | ||
892 | 74 | parser.error("Channel is an alias.") | ||
893 | 75 | |||
894 | 76 | if "redirect" in pub.list_channels()[args.channel]: | ||
895 | 77 | parser.error("Channel is a redirect.") | ||
896 | 78 | |||
897 | 79 | dev = pub.get_device(args.channel, args.device) | ||
898 | 80 | logging.info("Setting phased-percentage of '%s' to %s%%" % | ||
899 | 81 | (args.version, args.percentage)) | ||
900 | 82 | dev.set_phased_percentage(args.version, args.percentage) | ||
901 | 83 | |||
902 | 84 | # Sync all channel aliases | ||
903 | 85 | logging.info("Syncing any existing alias") | ||
904 | 86 | pub.sync_aliases(args.channel) | ||
905 | 87 | |||
906 | 88 | # Sync the mirrors | ||
907 | 89 | logging.info("Triggering a mirror sync") | ||
908 | 90 | tools.sync_mirrors(conf) | ||
909 | 0 | 91 | ||
910 | === added file 'bin/si-shell' | |||
911 | --- bin/si-shell 1970-01-01 00:00:00 +0000 | |||
912 | +++ bin/si-shell 2014-10-10 11:11:17 +0000 | |||
913 | @@ -0,0 +1,79 @@ | |||
914 | 1 | #!/usr/bin/python | ||
915 | 2 | # -*- coding: utf-8 -*- | ||
916 | 3 | |||
917 | 4 | # Copyright (C) 2013 Canonical Ltd. | ||
918 | 5 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
919 | 6 | |||
920 | 7 | # This program is free software: you can redistribute it and/or modify | ||
921 | 8 | # it under the terms of the GNU General Public License as published by | ||
922 | 9 | # the Free Software Foundation; version 3 of the License. | ||
923 | 10 | # | ||
924 | 11 | # This program is distributed in the hope that it will be useful, | ||
925 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
926 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
927 | 14 | # GNU General Public License for more details. | ||
928 | 15 | # | ||
929 | 16 | # You should have received a copy of the GNU General Public License | ||
930 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
931 | 18 | |||
932 | 19 | import code | ||
933 | 20 | import logging | ||
934 | 21 | import os | ||
935 | 22 | import sys | ||
936 | 23 | sys.path.insert(0, os.path.join(sys.path[0], os.pardir, "lib")) | ||
937 | 24 | |||
938 | 25 | from systemimage import config, tree | ||
939 | 26 | |||
940 | 27 | import argparse | ||
941 | 28 | |||
942 | 29 | if __name__ == '__main__': | ||
943 | 30 | parser = argparse.ArgumentParser(description="system-image shell") | ||
944 | 31 | parser.add_argument("--verbose", "-v", action="count", default=0) | ||
945 | 32 | |||
946 | 33 | args = parser.parse_args() | ||
947 | 34 | |||
948 | 35 | # Setup logging | ||
949 | 36 | formatter = logging.Formatter( | ||
950 | 37 | "%(asctime)s %(levelname)s %(message)s") | ||
951 | 38 | |||
952 | 39 | levels = {1: logging.ERROR, | ||
953 | 40 | 2: logging.WARNING, | ||
954 | 41 | 3: logging.INFO, | ||
955 | 42 | 4: logging.DEBUG} | ||
956 | 43 | |||
957 | 44 | if args.verbose > 0: | ||
958 | 45 | stdoutlogger = logging.StreamHandler(sys.stdout) | ||
959 | 46 | stdoutlogger.setFormatter(formatter) | ||
960 | 47 | stdoutlogger.setLevel(levels[min(4, args.verbose)]) | ||
961 | 48 | logging.root.addHandler(stdoutlogger) | ||
962 | 49 | else: | ||
963 | 50 | logging.root.addHandler(logging.NullHandler()) | ||
964 | 51 | |||
965 | 52 | # Load the configuration | ||
966 | 53 | conf = config.Config() | ||
967 | 54 | |||
968 | 55 | # Load the tree | ||
969 | 56 | pub = tree.Tree(conf) | ||
970 | 57 | |||
971 | 58 | # Start the shell | ||
972 | 59 | banner = """Welcome to the system-image shell. | ||
973 | 60 | The configuration is available as: conf | ||
974 | 61 | The system-image tree is availabe as: pub | ||
975 | 62 | """ | ||
976 | 63 | |||
977 | 64 | class CompleterConsole(code.InteractiveConsole): | ||
978 | 65 | def __init__(self): | ||
979 | 66 | local = {'conf': conf, | ||
980 | 67 | 'pub': pub} | ||
981 | 68 | code.InteractiveConsole.__init__(self, locals=local) | ||
982 | 69 | try: | ||
983 | 70 | import readline | ||
984 | 71 | except ImportError: | ||
985 | 72 | print('I: readline module not available.') | ||
986 | 73 | else: | ||
987 | 74 | import rlcompleter | ||
988 | 75 | rlcompleter # Silence pyflakes | ||
989 | 76 | readline.parse_and_bind("tab: complete") | ||
990 | 77 | |||
991 | 78 | console = CompleterConsole() | ||
992 | 79 | console.interact(banner) | ||
993 | 0 | 80 | ||
994 | === added directory 'etc' | |||
995 | === added file 'etc/config.example' | |||
996 | --- etc/config.example 1970-01-01 00:00:00 +0000 | |||
997 | +++ etc/config.example 2014-10-10 11:11:17 +0000 | |||
998 | @@ -0,0 +1,48 @@ | |||
999 | 1 | [global] | ||
1000 | 2 | base_path = /some/fs/path | ||
1001 | 3 | channels = trusty, trusty-proposed, trusty-customized | ||
1002 | 4 | gpg_key_path = secret/gpg/keys/ | ||
1003 | 5 | gpg_keyring_path = secret/gpg/keyrings/ | ||
1004 | 6 | publish_path = www/ | ||
1005 | 7 | state_path = state/ | ||
1006 | 8 | mirrors = a, b | ||
1007 | 9 | public_fqdn = system-image.example.net | ||
1008 | 10 | public_http_port = 80 | ||
1009 | 11 | public_https_port = 443 | ||
1010 | 12 | |||
1011 | 13 | [channel_trusty] | ||
1012 | 14 | type = manual | ||
1013 | 15 | versionbase = 1 | ||
1014 | 16 | fullcount = 10 | ||
1015 | 17 | |||
1016 | 18 | [channel_trusty-proposed] | ||
1017 | 19 | type = auto | ||
1018 | 20 | versionbase = 1 | ||
1019 | 21 | fullcount = 20 | ||
1020 | 22 | deltabase = trusty, trusty-proposed | ||
1021 | 23 | files = ubuntu, device, version | ||
1022 | 24 | file_ubuntu = cdimage-ubuntu;daily-preinstalled;trusty,import=any | ||
1023 | 25 | file_device = cdimage-device;daily-preinstalled;trusty,import=any | ||
1024 | 26 | file_version = version | ||
1025 | 27 | |||
1026 | 28 | [channel_trusty-customized] | ||
1027 | 29 | type = auto | ||
1028 | 30 | versionbase = 1 | ||
1029 | 31 | fullcount = 15 | ||
1030 | 32 | files = ubuntu, device, custom, version | ||
1031 | 33 | file_ubuntu = system-image;trusty;file=ubuntu | ||
1032 | 34 | file_device = system-image;trusty;file=device | ||
1033 | 35 | file_custom = http;http://www.example.net/custom/custom.tar.xz;name=custom,monitor=http://www.example.net/custom/build_number | ||
1034 | 36 | file_version = version | ||
1035 | 37 | |||
1036 | 38 | [mirror_default] | ||
1037 | 39 | ssh_user = mirror | ||
1038 | 40 | ssh_key = secret/ssh/mirror | ||
1039 | 41 | ssh_port = 22 | ||
1040 | 42 | ssh_command = sync-mirror | ||
1041 | 43 | |||
1042 | 44 | [mirror_a] | ||
1043 | 45 | ssh_host = a.example.com | ||
1044 | 46 | |||
1045 | 47 | [mirror_b] | ||
1046 | 48 | ssh_host = b.example.com | ||
1047 | 0 | 49 | ||
1048 | === added directory 'lib' | |||
1049 | === added directory 'lib/systemimage' | |||
1050 | === added file 'lib/systemimage/__init__.py' | |||
1051 | === added file 'lib/systemimage/config.py' | |||
1052 | --- lib/systemimage/config.py 1970-01-01 00:00:00 +0000 | |||
1053 | +++ lib/systemimage/config.py 2014-10-10 11:11:17 +0000 | |||
1054 | @@ -0,0 +1,206 @@ | |||
1055 | 1 | # -*- coding: utf-8 -*- | ||
1056 | 2 | |||
1057 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
1058 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
1059 | 5 | |||
1060 | 6 | # This program is free software: you can redistribute it and/or modify | ||
1061 | 7 | # it under the terms of the GNU General Public License as published by | ||
1062 | 8 | # the Free Software Foundation; version 3 of the License. | ||
1063 | 9 | # | ||
1064 | 10 | # This program is distributed in the hope that it will be useful, | ||
1065 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1066 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1067 | 13 | # GNU General Public License for more details. | ||
1068 | 14 | # | ||
1069 | 15 | # You should have received a copy of the GNU General Public License | ||
1070 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
1071 | 17 | |||
1072 | 18 | import os | ||
1073 | 19 | |||
1074 | 20 | try: | ||
1075 | 21 | from configparser import ConfigParser | ||
1076 | 22 | except ImportError: # pragma: no cover | ||
1077 | 23 | from ConfigParser import ConfigParser | ||
1078 | 24 | |||
1079 | 25 | |||
1080 | 26 | def parse_config(path): | ||
1081 | 27 | config = {} | ||
1082 | 28 | |||
1083 | 29 | configp = ConfigParser() | ||
1084 | 30 | try: | ||
1085 | 31 | configp.read(path) | ||
1086 | 32 | except: | ||
1087 | 33 | return config | ||
1088 | 34 | |||
1089 | 35 | for section in configp.sections(): | ||
1090 | 36 | config_section = {} | ||
1091 | 37 | for option in configp.options(section): | ||
1092 | 38 | value = configp.get(section, option) | ||
1093 | 39 | if ", " in value: | ||
1094 | 40 | value = [entry.strip('"').strip() | ||
1095 | 41 | for entry in value.split(", ")] | ||
1096 | 42 | else: | ||
1097 | 43 | value = value.strip('"').strip() | ||
1098 | 44 | config_section[option] = value | ||
1099 | 45 | config[section] = config_section | ||
1100 | 46 | |||
1101 | 47 | return config | ||
1102 | 48 | |||
1103 | 49 | |||
1104 | 50 | class Config: | ||
1105 | 51 | def __init__(self, path=None): | ||
1106 | 52 | if not path: | ||
1107 | 53 | path = "%s/etc/config" % os.environ.get("SYSTEM_IMAGE_ROOT", | ||
1108 | 54 | os.getcwd()) | ||
1109 | 55 | if not os.path.exists(path): | ||
1110 | 56 | path = os.path.realpath(os.path.join(os.path.dirname(__file__), | ||
1111 | 57 | "../../etc/config")) | ||
1112 | 58 | |||
1113 | 59 | self.load_config(path) | ||
1114 | 60 | |||
1115 | 61 | def load_config(self, path): | ||
1116 | 62 | if not os.path.exists(path): | ||
1117 | 63 | raise Exception("Configuration file doesn't exist: %s" % path) | ||
1118 | 64 | |||
1119 | 65 | # Read the config | ||
1120 | 66 | config = parse_config(path) | ||
1121 | 67 | |||
1122 | 68 | if 'global' not in config: | ||
1123 | 69 | config['global'] = {} | ||
1124 | 70 | |||
1125 | 71 | # Set defaults | ||
1126 | 72 | self.base_path = config['global'].get( | ||
1127 | 73 | "base_path", os.environ.get("SYSTEM_IMAGE_ROOT", os.getcwd())) | ||
1128 | 74 | |||
1129 | 75 | self.gpg_key_path = config['global'].get( | ||
1130 | 76 | "gpg_key_path", os.path.join(self.base_path, | ||
1131 | 77 | "secret", "gpg", "keys")) | ||
1132 | 78 | if not self.gpg_key_path.startswith("/"): | ||
1133 | 79 | self.gpg_key_path = os.path.join(self.base_path, self.gpg_key_path) | ||
1134 | 80 | |||
1135 | 81 | self.gpg_keyring_path = config['global'].get( | ||
1136 | 82 | "gpg_keyring_path", os.path.join(self.base_path, | ||
1137 | 83 | "secret", "gpg", "keyrings")) | ||
1138 | 84 | if not self.gpg_keyring_path.startswith("/"): | ||
1139 | 85 | self.gpg_keyring_path = os.path.join(self.base_path, | ||
1140 | 86 | self.gpg_keyring_path) | ||
1141 | 87 | |||
1142 | 88 | self.publish_path = config['global'].get( | ||
1143 | 89 | "publish_path", os.path.join(self.base_path, "www")) | ||
1144 | 90 | if not self.publish_path.startswith("/"): | ||
1145 | 91 | self.publish_path = os.path.join(self.base_path, self.publish_path) | ||
1146 | 92 | |||
1147 | 93 | self.state_path = config['global'].get( | ||
1148 | 94 | "state_path", os.path.join(self.base_path, "state")) | ||
1149 | 95 | if not self.state_path.startswith("/"): | ||
1150 | 96 | self.state_path = os.path.join(self.base_path, self.state_path) | ||
1151 | 97 | |||
1152 | 98 | # Export some more keys as-is | ||
1153 | 99 | for key in ("public_fqdn", "public_http_port", "public_https_port"): | ||
1154 | 100 | if key not in config['global']: | ||
1155 | 101 | continue | ||
1156 | 102 | |||
1157 | 103 | setattr(self, key, config['global'][key]) | ||
1158 | 104 | |||
1159 | 105 | # Parse the mirror configuration | ||
1160 | 106 | self.mirrors = {} | ||
1161 | 107 | if "mirrors" in config['global']: | ||
1162 | 108 | if not isinstance(config['global']['mirrors'], list): | ||
1163 | 109 | config['global']['mirrors'] = [config['global']['mirrors']] | ||
1164 | 110 | |||
1165 | 111 | if len(config['global']['mirrors']) != 0: | ||
1166 | 112 | if "mirror_default" not in config: | ||
1167 | 113 | raise KeyError("Missing mirror_default section.") | ||
1168 | 114 | |||
1169 | 115 | for key in ("ssh_user", "ssh_key", "ssh_port", "ssh_command"): | ||
1170 | 116 | if key not in config['mirror_default']: | ||
1171 | 117 | raise KeyError("Missing key in mirror_default: %s" % | ||
1172 | 118 | key) | ||
1173 | 119 | |||
1174 | 120 | for entry in config['global']['mirrors']: | ||
1175 | 121 | dict_entry = "mirror_%s" % entry | ||
1176 | 122 | if dict_entry not in config: | ||
1177 | 123 | raise KeyError("Missing mirror section: %s" % | ||
1178 | 124 | dict_entry) | ||
1179 | 125 | |||
1180 | 126 | mirror = type("Mirror", (object,), {}) | ||
1181 | 127 | |||
1182 | 128 | if "ssh_host" not in config[dict_entry]: | ||
1183 | 129 | raise KeyError("Missing key in %s: ssh_host" % | ||
1184 | 130 | dict_entry) | ||
1185 | 131 | else: | ||
1186 | 132 | mirror.ssh_host = config[dict_entry]['ssh_host'] | ||
1187 | 133 | |||
1188 | 134 | mirror.ssh_user = config[dict_entry].get( | ||
1189 | 135 | "ssh_user", config['mirror_default']['ssh_user']) | ||
1190 | 136 | mirror.ssh_key = config[dict_entry].get( | ||
1191 | 137 | "ssh_key", config['mirror_default']['ssh_key']) | ||
1192 | 138 | if not mirror.ssh_key.startswith("/"): | ||
1193 | 139 | mirror.ssh_key = os.path.join(self.base_path, | ||
1194 | 140 | mirror.ssh_key) | ||
1195 | 141 | mirror.ssh_port = int(config[dict_entry].get( | ||
1196 | 142 | "ssh_port", config['mirror_default']['ssh_port'])) | ||
1197 | 143 | mirror.ssh_command = config[dict_entry].get( | ||
1198 | 144 | "ssh_command", config['mirror_default']['ssh_command']) | ||
1199 | 145 | |||
1200 | 146 | self.mirrors[entry] = mirror | ||
1201 | 147 | |||
1202 | 148 | # Parse the channel configuration | ||
1203 | 149 | self.channels = {} | ||
1204 | 150 | if "channels" in config['global']: | ||
1205 | 151 | if not isinstance(config['global']['channels'], list): | ||
1206 | 152 | config['global']['channels'] = \ | ||
1207 | 153 | [config['global']['channels']] | ||
1208 | 154 | |||
1209 | 155 | if len(config['global']['channels']) != 0: | ||
1210 | 156 | for entry in config['global']['channels']: | ||
1211 | 157 | dict_entry = "channel_%s" % entry | ||
1212 | 158 | if dict_entry not in config: | ||
1213 | 159 | raise KeyError("Missing channel section: %s" % | ||
1214 | 160 | dict_entry) | ||
1215 | 161 | |||
1216 | 162 | channel = type("Channel", (object,), {}) | ||
1217 | 163 | |||
1218 | 164 | channel.versionbase = int(config[dict_entry].get( | ||
1219 | 165 | 'versionbase', 1)) | ||
1220 | 166 | |||
1221 | 167 | channel.type = config[dict_entry].get( | ||
1222 | 168 | "type", "manual") | ||
1223 | 169 | |||
1224 | 170 | channel.fullcount = int(config[dict_entry].get( | ||
1225 | 171 | "fullcount", 0)) | ||
1226 | 172 | |||
1227 | 173 | channel.deltabase = [entry] | ||
1228 | 174 | if "deltabase" in config[dict_entry]: | ||
1229 | 175 | if isinstance(config[dict_entry]["deltabase"], | ||
1230 | 176 | list): | ||
1231 | 177 | channel.deltabase = \ | ||
1232 | 178 | config[dict_entry]["deltabase"] | ||
1233 | 179 | else: | ||
1234 | 180 | channel.deltabase = \ | ||
1235 | 181 | [config[dict_entry]["deltabase"]] | ||
1236 | 182 | |||
1237 | 183 | # Parse the file list | ||
1238 | 184 | files = config[dict_entry].get("files", []) | ||
1239 | 185 | if isinstance(files, str): | ||
1240 | 186 | files = [files] | ||
1241 | 187 | |||
1242 | 188 | channel.files = [] | ||
1243 | 189 | for file_entry in files: | ||
1244 | 190 | if "file_%s" % file_entry not in config[dict_entry]: | ||
1245 | 191 | raise KeyError("Missing file entry: %s" % | ||
1246 | 192 | "file_%s" % file_entry) | ||
1247 | 193 | |||
1248 | 194 | fields = (config[dict_entry] | ||
1249 | 195 | ["file_%s" % file_entry].split(";")) | ||
1250 | 196 | |||
1251 | 197 | file_dict = {} | ||
1252 | 198 | file_dict['name'] = file_entry | ||
1253 | 199 | file_dict['generator'] = fields[0] | ||
1254 | 200 | file_dict['arguments'] = [] | ||
1255 | 201 | if len(fields) > 1: | ||
1256 | 202 | file_dict['arguments'] = fields[1:] | ||
1257 | 203 | |||
1258 | 204 | channel.files.append(file_dict) | ||
1259 | 205 | |||
1260 | 206 | self.channels[entry] = channel | ||
1261 | 0 | 207 | ||
1262 | === added file 'lib/systemimage/diff.py' | |||
1263 | --- lib/systemimage/diff.py 1970-01-01 00:00:00 +0000 | |||
1264 | +++ lib/systemimage/diff.py 2014-10-10 11:11:17 +0000 | |||
1265 | @@ -0,0 +1,242 @@ | |||
1266 | 1 | # -*- coding: utf-8 -*- | ||
1267 | 2 | |||
1268 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
1269 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
1270 | 5 | |||
1271 | 6 | # This program is free software: you can redistribute it and/or modify | ||
1272 | 7 | # it under the terms of the GNU General Public License as published by | ||
1273 | 8 | # the Free Software Foundation; version 3 of the License. | ||
1274 | 9 | # | ||
1275 | 10 | # This program is distributed in the hope that it will be useful, | ||
1276 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1277 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1278 | 13 | # GNU General Public License for more details. | ||
1279 | 14 | # | ||
1280 | 15 | # You should have received a copy of the GNU General Public License | ||
1281 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
1282 | 17 | |||
1283 | 18 | import os | ||
1284 | 19 | import tarfile | ||
1285 | 20 | import time | ||
1286 | 21 | |||
1287 | 22 | from io import BytesIO | ||
1288 | 23 | |||
1289 | 24 | |||
1290 | 25 | def compare_files(fd_source, fd_target): | ||
1291 | 26 | """ | ||
1292 | 27 | Compare two files. | ||
1293 | 28 | |||
1294 | 29 | Returns True if their content matches. | ||
1295 | 30 | Returns False if they don't match. | ||
1296 | 31 | Returns None if the files can't be compared. | ||
1297 | 32 | """ | ||
1298 | 33 | |||
1299 | 34 | if fd_source == fd_target: | ||
1300 | 35 | return True | ||
1301 | 36 | |||
1302 | 37 | if not fd_source or not fd_target: | ||
1303 | 38 | return False | ||
1304 | 39 | |||
1305 | 40 | return fd_source.read() == fd_target.read() | ||
1306 | 41 | |||
1307 | 42 | |||
1308 | 43 | def list_tarfile(tarfile): | ||
1309 | 44 | """ | ||
1310 | 45 | Walk through a tarfile and generate a list of the content. | ||
1311 | 46 | |||
1312 | 47 | Returns a tuple containing a set and a dict. | ||
1313 | 48 | The set is typically used for simple diffs between tarballs. | ||
1314 | 49 | The dict is used to easily grab the details of a specific entry. | ||
1315 | 50 | """ | ||
1316 | 51 | |||
1317 | 52 | set_content = set() | ||
1318 | 53 | dict_content = {} | ||
1319 | 54 | |||
1320 | 55 | for entry in tarfile: | ||
1321 | 56 | if entry.isdir(): | ||
1322 | 57 | set_content.add((entry.path, 'dir', None)) | ||
1323 | 58 | dict_content[entry.path] = ('dir', None) | ||
1324 | 59 | else: | ||
1325 | 60 | fhash = ("%s" % entry.mode, | ||
1326 | 61 | "%s" % entry.devmajor, | ||
1327 | 62 | "%s" % entry.devminor, | ||
1328 | 63 | "%s" % entry.type.decode('utf-8'), | ||
1329 | 64 | "%s" % entry.uid, | ||
1330 | 65 | "%s" % entry.gid, | ||
1331 | 66 | "%s" % entry.size, | ||
1332 | 67 | "%s" % entry.mtime) | ||
1333 | 68 | |||
1334 | 69 | set_content.add((entry.path, 'file', fhash)) | ||
1335 | 70 | dict_content[entry.path] = ('file', fhash) | ||
1336 | 71 | |||
1337 | 72 | return (set_content, dict_content) | ||
1338 | 73 | |||
1339 | 74 | |||
1340 | 75 | class ImageDiff: | ||
1341 | 76 | source_content = None | ||
1342 | 77 | target_content = None | ||
1343 | 78 | diff = None | ||
1344 | 79 | |||
1345 | 80 | def __init__(self, source, target): | ||
1346 | 81 | self.source_file = tarfile.open(source, 'r:') | ||
1347 | 82 | self.target_file = tarfile.open(target, 'r:') | ||
1348 | 83 | |||
1349 | 84 | def scan_content(self, image): | ||
1350 | 85 | """ | ||
1351 | 86 | Scan the content of an image and return the image tuple. | ||
1352 | 87 | This also caches the content for further use. | ||
1353 | 88 | """ | ||
1354 | 89 | |||
1355 | 90 | if image not in ("source", "target"): | ||
1356 | 91 | raise KeyError("Invalid image '%s'." % image) | ||
1357 | 92 | |||
1358 | 93 | image_file = getattr(self, "%s_file" % image) | ||
1359 | 94 | |||
1360 | 95 | content = list_tarfile(image_file) | ||
1361 | 96 | |||
1362 | 97 | setattr(self, "%s_content" % image, content) | ||
1363 | 98 | return content | ||
1364 | 99 | |||
1365 | 100 | def compare_images(self): | ||
1366 | 101 | """ | ||
1367 | 102 | Compare the file listing of two images and return a set. | ||
1368 | 103 | This also caches the diff for further use. | ||
1369 | 104 | |||
1370 | 105 | The set contains tuples of (path, changetype). | ||
1371 | 106 | """ | ||
1372 | 107 | if not self.source_content: | ||
1373 | 108 | self.scan_content("source") | ||
1374 | 109 | |||
1375 | 110 | if not self.target_content: | ||
1376 | 111 | self.scan_content("target") | ||
1377 | 112 | |||
1378 | 113 | # Find the changes in the two trees | ||
1379 | 114 | changes = set() | ||
1380 | 115 | for change in self.source_content[0] \ | ||
1381 | 116 | .symmetric_difference(self.target_content[0]): | ||
1382 | 117 | if change[0] not in self.source_content[1]: | ||
1383 | 118 | changetype = "add" | ||
1384 | 119 | elif change[0] not in self.target_content[1]: | ||
1385 | 120 | changetype = "del" | ||
1386 | 121 | else: | ||
1387 | 122 | changetype = "mod" | ||
1388 | 123 | changes.add((change[0], changetype)) | ||
1389 | 124 | |||
1390 | 125 | # Ignore files that only vary in mtime | ||
1391 | 126 | # (separate loop to run after de-dupe) | ||
1392 | 127 | for change in sorted(changes): | ||
1393 | 128 | if change[1] == "mod": | ||
1394 | 129 | fstat_source = self.source_content[1][change[0]][1] | ||
1395 | 130 | fstat_target = self.target_content[1][change[0]][1] | ||
1396 | 131 | |||
1397 | 132 | # Skip differences between directories and files | ||
1398 | 133 | if not fstat_source or not fstat_target: # pragma: no cover | ||
1399 | 134 | continue | ||
1400 | 135 | |||
1401 | 136 | # Deal with switched hardlinks | ||
1402 | 137 | if (fstat_source[0:2] == fstat_target[0:2] and | ||
1403 | 138 | fstat_source[3] != fstat_target[3] and | ||
1404 | 139 | (fstat_source[3] == "1" or fstat_target[3] == "1") and | ||
1405 | 140 | fstat_source[4:5] == fstat_target[4:5] and | ||
1406 | 141 | fstat_source[7] == fstat_target[7]): | ||
1407 | 142 | source_file = self.source_file.getmember(change[0]) | ||
1408 | 143 | target_file = self.target_file.getmember(change[0]) | ||
1409 | 144 | if compare_files( | ||
1410 | 145 | self.source_file.extractfile(change[0]), | ||
1411 | 146 | self.target_file.extractfile(change[0])): | ||
1412 | 147 | changes.remove(change) | ||
1413 | 148 | continue | ||
1414 | 149 | |||
1415 | 150 | # Deal with regular files | ||
1416 | 151 | if fstat_source[0:7] == fstat_target[0:7]: | ||
1417 | 152 | source_file = self.source_file.getmember(change[0]) | ||
1418 | 153 | target_file = self.target_file.getmember(change[0]) | ||
1419 | 154 | |||
1420 | 155 | if (source_file.linkpath | ||
1421 | 156 | and source_file.linkpath == target_file.linkpath): | ||
1422 | 157 | changes.remove(change) | ||
1423 | 158 | continue | ||
1424 | 159 | |||
1425 | 160 | if (source_file.isfile() and target_file.isfile() | ||
1426 | 161 | and compare_files( | ||
1427 | 162 | self.source_file.extractfile(change[0]), | ||
1428 | 163 | self.target_file.extractfile(change[0]))): | ||
1429 | 164 | changes.remove(change) | ||
1430 | 165 | continue | ||
1431 | 166 | |||
1432 | 167 | self.diff = changes | ||
1433 | 168 | return changes | ||
1434 | 169 | |||
1435 | 170 | def print_changes(self): | ||
1436 | 171 | """ | ||
1437 | 172 | Simply print the list of changes. | ||
1438 | 173 | """ | ||
1439 | 174 | |||
1440 | 175 | if not self.diff: | ||
1441 | 176 | self.compare_images() | ||
1442 | 177 | |||
1443 | 178 | for change in sorted(self.diff): | ||
1444 | 179 | print(" - %s (%s)" % (change[0], change[1])) | ||
1445 | 180 | |||
1446 | 181 | def generate_diff_tarball(self, path): | ||
1447 | 182 | """ | ||
1448 | 183 | Generate a tarball containing all files that are | ||
1449 | 184 | different between the source and target iamge as well | ||
1450 | 185 | as a file listing all removals. | ||
1451 | 186 | """ | ||
1452 | 187 | |||
1453 | 188 | if not self.diff: | ||
1454 | 189 | self.compare_images() | ||
1455 | 190 | |||
1456 | 191 | output = tarfile.open(path, 'w:') | ||
1457 | 192 | |||
1458 | 193 | # List both deleted files and modified files in the removal list | ||
1459 | 194 | # that's needed to allow file type change (e.g. directory to symlink) | ||
1460 | 195 | removed_files_list = [entry[0] for entry in self.diff | ||
1461 | 196 | if entry[1] in ("del", "mod")] | ||
1462 | 197 | |||
1463 | 198 | removed_files = "\n".join(removed_files_list) | ||
1464 | 199 | removed_files = "%s\n" % removed_files.encode('utf-8') | ||
1465 | 200 | |||
1466 | 201 | removals = tarfile.TarInfo() | ||
1467 | 202 | removals.name = "removed" | ||
1468 | 203 | removals.size = len(removed_files) | ||
1469 | 204 | removals.mtime = int(time.strftime("%s", time.localtime())) | ||
1470 | 205 | removals.uname = "root" | ||
1471 | 206 | removals.gname = "root" | ||
1472 | 207 | |||
1473 | 208 | output.addfile(removals, BytesIO(removed_files.encode('utf-8'))) | ||
1474 | 209 | |||
1475 | 210 | # Copy all the added and modified | ||
1476 | 211 | added = [] | ||
1477 | 212 | for name, action in sorted(self.diff): | ||
1478 | 213 | if action == 'del': | ||
1479 | 214 | continue | ||
1480 | 215 | |||
1481 | 216 | if name in added: | ||
1482 | 217 | continue | ||
1483 | 218 | |||
1484 | 219 | newfile = self.target_file.getmember(name) | ||
1485 | 220 | if newfile.islnk(): | ||
1486 | 221 | if newfile.linkname.startswith("system/"): | ||
1487 | 222 | targetfile_path = newfile.linkname | ||
1488 | 223 | else: | ||
1489 | 224 | targetfile_path = os.path.normpath(os.path.join( | ||
1490 | 225 | os.path.dirname(newfile.name), newfile.linkname)) | ||
1491 | 226 | |||
1492 | 227 | targetfile = self.target_file.getmember(targetfile_path) | ||
1493 | 228 | |||
1494 | 229 | if ((targetfile_path, 'add') in self.diff or | ||
1495 | 230 | (targetfile_path, 'mod') in self.diff) and \ | ||
1496 | 231 | targetfile_path not in added: | ||
1497 | 232 | fileptr = self.target_file.extractfile(targetfile) | ||
1498 | 233 | output.addfile(targetfile, fileptr) | ||
1499 | 234 | added.append(targetfile.name) | ||
1500 | 235 | |||
1501 | 236 | fileptr = None | ||
1502 | 237 | if newfile.isfile(): | ||
1503 | 238 | fileptr = self.target_file.extractfile(name) | ||
1504 | 239 | output.addfile(newfile, fileobj=fileptr) | ||
1505 | 240 | added.append(newfile.name) | ||
1506 | 241 | |||
1507 | 242 | output.close() | ||
1508 | 0 | 243 | ||
1509 | === added file 'lib/systemimage/generators.py' | |||
1510 | --- lib/systemimage/generators.py 1970-01-01 00:00:00 +0000 | |||
1511 | +++ lib/systemimage/generators.py 2014-10-10 11:11:17 +0000 | |||
1512 | @@ -0,0 +1,1173 @@ | |||
1513 | 1 | # -*- coding: utf-8 -*- | ||
1514 | 2 | |||
1515 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
1516 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
1517 | 5 | |||
1518 | 6 | # This program is free software: you can redistribute it and/or modify | ||
1519 | 7 | # it under the terms of the GNU General Public License as published by | ||
1520 | 8 | # the Free Software Foundation; version 3 of the License. | ||
1521 | 9 | # | ||
1522 | 10 | # This program is distributed in the hope that it will be useful, | ||
1523 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1524 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1525 | 13 | # GNU General Public License for more details. | ||
1526 | 14 | # | ||
1527 | 15 | # You should have received a copy of the GNU General Public License | ||
1528 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
1529 | 17 | |||
1530 | 18 | from hashlib import sha256 | ||
1531 | 19 | from systemimage import diff, gpg, tree, tools | ||
1532 | 20 | import json | ||
1533 | 21 | import os | ||
1534 | 22 | import socket | ||
1535 | 23 | import shutil | ||
1536 | 24 | import subprocess | ||
1537 | 25 | import tarfile | ||
1538 | 26 | import tempfile | ||
1539 | 27 | import time | ||
1540 | 28 | |||
1541 | 29 | try: | ||
1542 | 30 | from urllib.request import urlopen, urlretrieve | ||
1543 | 31 | except ImportError: # pragma: no cover | ||
1544 | 32 | from urllib import urlopen, urlretrieve | ||
1545 | 33 | |||
1546 | 34 | # Global | ||
1547 | 35 | CACHE = {} | ||
1548 | 36 | |||
1549 | 37 | |||
1550 | 38 | def root_ownership(tarinfo): | ||
1551 | 39 | tarinfo.mode = 0o644 | ||
1552 | 40 | tarinfo.mtime = int(time.strftime("%s", time.localtime())) | ||
1553 | 41 | tarinfo.uname = "root" | ||
1554 | 42 | tarinfo.gname = "root" | ||
1555 | 43 | return tarinfo | ||
1556 | 44 | |||
1557 | 45 | |||
1558 | 46 | def unpack_arguments(arguments): | ||
1559 | 47 | """ | ||
1560 | 48 | Takes a string representing comma separate key=value options and | ||
1561 | 49 | returns a dict. | ||
1562 | 50 | """ | ||
1563 | 51 | arg_dict = {} | ||
1564 | 52 | |||
1565 | 53 | for option in arguments.split(","): | ||
1566 | 54 | fields = option.split("=") | ||
1567 | 55 | if len(fields) != 2: | ||
1568 | 56 | continue | ||
1569 | 57 | |||
1570 | 58 | arg_dict[fields[0]] = fields[1] | ||
1571 | 59 | |||
1572 | 60 | return arg_dict | ||
1573 | 61 | |||
1574 | 62 | |||
1575 | 63 | def generate_delta(conf, source_path, target_path): | ||
1576 | 64 | """ | ||
1577 | 65 | Take two .tar.xz file and generate a third file, stored in the pool. | ||
1578 | 66 | The path to the pool file is then returned and <path>.asc is also | ||
1579 | 67 | generated using the default signing key. | ||
1580 | 68 | """ | ||
1581 | 69 | source_filename = source_path.split("/")[-1].replace(".tar.xz", "") | ||
1582 | 70 | target_filename = target_path.split("/")[-1].replace(".tar.xz", "") | ||
1583 | 71 | |||
1584 | 72 | # FIXME: This is a bit of an hack, it'd be better not to have to hardcode | ||
1585 | 73 | # that kind of stuff... | ||
1586 | 74 | if (source_filename.startswith("version-") | ||
1587 | 75 | and target_filename.startswith("version-")): | ||
1588 | 76 | return target_path | ||
1589 | 77 | |||
1590 | 78 | if (source_filename.startswith("keyring-") | ||
1591 | 79 | and target_filename.startswith("keyring-")): | ||
1592 | 80 | return target_path | ||
1593 | 81 | |||
1594 | 82 | # Now for everything else | ||
1595 | 83 | path = os.path.realpath(os.path.join(conf.publish_path, "pool", | ||
1596 | 84 | "%s.delta-%s.tar.xz" % | ||
1597 | 85 | (target_filename, source_filename))) | ||
1598 | 86 | |||
1599 | 87 | # Return pre-existing entries | ||
1600 | 88 | if os.path.exists(path): | ||
1601 | 89 | return path | ||
1602 | 90 | |||
1603 | 91 | # Create the pool if it doesn't exist | ||
1604 | 92 | if not os.path.exists(os.path.join(conf.publish_path, "pool")): | ||
1605 | 93 | os.makedirs(os.path.join(conf.publish_path, "pool")) | ||
1606 | 94 | |||
1607 | 95 | # Generate the diff | ||
1608 | 96 | tempdir = tempfile.mkdtemp() | ||
1609 | 97 | tools.xz_uncompress(source_path, os.path.join(tempdir, "source.tar")) | ||
1610 | 98 | tools.xz_uncompress(target_path, os.path.join(tempdir, "target.tar")) | ||
1611 | 99 | |||
1612 | 100 | imagediff = diff.ImageDiff(os.path.join(tempdir, "source.tar"), | ||
1613 | 101 | os.path.join(tempdir, "target.tar")) | ||
1614 | 102 | |||
1615 | 103 | imagediff.generate_diff_tarball(os.path.join(tempdir, "output.tar")) | ||
1616 | 104 | tools.xz_compress(os.path.join(tempdir, "output.tar"), path) | ||
1617 | 105 | shutil.rmtree(tempdir) | ||
1618 | 106 | |||
1619 | 107 | # Sign the result | ||
1620 | 108 | gpg.sign_file(conf, "image-signing", path) | ||
1621 | 109 | |||
1622 | 110 | # Generate the metadata file | ||
1623 | 111 | metadata = {} | ||
1624 | 112 | metadata['generator'] = "delta" | ||
1625 | 113 | metadata['source'] = {} | ||
1626 | 114 | metadata['target'] = {} | ||
1627 | 115 | |||
1628 | 116 | if os.path.exists(source_path.replace(".tar.xz", ".json")): | ||
1629 | 117 | with open(source_path.replace(".tar.xz", ".json"), "r") as fd: | ||
1630 | 118 | metadata['source'] = json.loads(fd.read()) | ||
1631 | 119 | |||
1632 | 120 | if os.path.exists(target_path.replace(".tar.xz", ".json")): | ||
1633 | 121 | with open(target_path.replace(".tar.xz", ".json"), "r") as fd: | ||
1634 | 122 | metadata['target'] = json.loads(fd.read()) | ||
1635 | 123 | |||
1636 | 124 | with open(path.replace(".tar.xz", ".json"), "w+") as fd: | ||
1637 | 125 | fd.write("%s\n" % json.dumps(metadata, sort_keys=True, | ||
1638 | 126 | indent=4, separators=(',', ': '))) | ||
1639 | 127 | gpg.sign_file(conf, "image-signing", path.replace(".tar.xz", ".json")) | ||
1640 | 128 | |||
1641 | 129 | return path | ||
1642 | 130 | |||
1643 | 131 | |||
1644 | 132 | def generate_file(conf, generator, arguments, environment): | ||
1645 | 133 | """ | ||
1646 | 134 | Dispatcher for the various generators and importers. | ||
1647 | 135 | It calls the right generator and signs the generated file | ||
1648 | 136 | before returning the path. | ||
1649 | 137 | """ | ||
1650 | 138 | |||
1651 | 139 | if generator == "version": | ||
1652 | 140 | path = generate_file_version(conf, arguments, environment) | ||
1653 | 141 | elif generator == "cdimage-device": | ||
1654 | 142 | path = generate_file_cdimage_device(conf, arguments, environment) | ||
1655 | 143 | elif generator == "cdimage-ubuntu": | ||
1656 | 144 | path = generate_file_cdimage_ubuntu(conf, arguments, environment) | ||
1657 | 145 | elif generator == "cdimage-custom": | ||
1658 | 146 | path = generate_file_cdimage_custom(conf, arguments, environment) | ||
1659 | 147 | elif generator == "http": | ||
1660 | 148 | path = generate_file_http(conf, arguments, environment) | ||
1661 | 149 | elif generator == "keyring": | ||
1662 | 150 | path = generate_file_keyring(conf, arguments, environment) | ||
1663 | 151 | elif generator == "system-image": | ||
1664 | 152 | path = generate_file_system_image(conf, arguments, environment) | ||
1665 | 153 | elif generator == "remote-system-image": | ||
1666 | 154 | path = generate_file_remote_system_image(conf, arguments, environment) | ||
1667 | 155 | else: | ||
1668 | 156 | raise Exception("Invalid generator: %s" % generator) | ||
1669 | 157 | |||
1670 | 158 | return path | ||
1671 | 159 | |||
1672 | 160 | |||
1673 | 161 | def generate_file_cdimage_device(conf, arguments, environment): | ||
1674 | 162 | """ | ||
1675 | 163 | Scan a cdimage tree for new device files. | ||
1676 | 164 | """ | ||
1677 | 165 | |||
1678 | 166 | # We need at least a path and a series | ||
1679 | 167 | if len(arguments) < 2: | ||
1680 | 168 | return None | ||
1681 | 169 | |||
1682 | 170 | # Read the arguments | ||
1683 | 171 | cdimage_path = arguments[0] | ||
1684 | 172 | series = arguments[1] | ||
1685 | 173 | |||
1686 | 174 | options = {} | ||
1687 | 175 | if len(arguments) > 2: | ||
1688 | 176 | options = unpack_arguments(arguments[2]) | ||
1689 | 177 | |||
1690 | 178 | boot_arch = "armhf" | ||
1691 | 179 | recovery_arch = "armel" | ||
1692 | 180 | system_arch = "armel" | ||
1693 | 181 | if environment['device_name'] in ("generic_x86", "generic_i386"): | ||
1694 | 182 | boot_arch = "i386" | ||
1695 | 183 | recovery_arch = "i386" | ||
1696 | 184 | system_arch = "i386" | ||
1697 | 185 | elif environment['device_name'] in ("generic_amd64",): | ||
1698 | 186 | boot_arch = "amd64" | ||
1699 | 187 | recovery_arch = "amd64" | ||
1700 | 188 | system_arch = "amd64" | ||
1701 | 189 | |||
1702 | 190 | # Check that the directory exists | ||
1703 | 191 | if not os.path.exists(cdimage_path): | ||
1704 | 192 | return None | ||
1705 | 193 | |||
1706 | 194 | versions = sorted([version for version in os.listdir(cdimage_path) | ||
1707 | 195 | if version not in ("pending", "current")], | ||
1708 | 196 | reverse=True) | ||
1709 | 197 | |||
1710 | 198 | for version in versions: | ||
1711 | 199 | # Skip directory without checksums | ||
1712 | 200 | if not os.path.exists(os.path.join(cdimage_path, version, | ||
1713 | 201 | "SHA256SUMS")): | ||
1714 | 202 | continue | ||
1715 | 203 | |||
1716 | 204 | # Check for all the needed files | ||
1717 | 205 | boot_path = os.path.join(cdimage_path, version, | ||
1718 | 206 | "%s-preinstalled-boot-%s+%s.img" % | ||
1719 | 207 | (series, boot_arch, | ||
1720 | 208 | environment['device_name'])) | ||
1721 | 209 | if not os.path.exists(boot_path): | ||
1722 | 210 | continue | ||
1723 | 211 | |||
1724 | 212 | recovery_path = os.path.join(cdimage_path, version, | ||
1725 | 213 | "%s-preinstalled-recovery-%s+%s.img" % | ||
1726 | 214 | (series, recovery_arch, | ||
1727 | 215 | environment['device_name'])) | ||
1728 | 216 | if not os.path.exists(recovery_path): | ||
1729 | 217 | continue | ||
1730 | 218 | |||
1731 | 219 | system_path = os.path.join(cdimage_path, version, | ||
1732 | 220 | "%s-preinstalled-system-%s+%s.img" % | ||
1733 | 221 | (series, system_arch, | ||
1734 | 222 | environment['device_name'])) | ||
1735 | 223 | if not os.path.exists(system_path): | ||
1736 | 224 | continue | ||
1737 | 225 | |||
1738 | 226 | # Check if we should only import tested images | ||
1739 | 227 | if options.get("import", "any") == "good": | ||
1740 | 228 | if not os.path.exists(os.path.join(cdimage_path, version, | ||
1741 | 229 | ".marked_good")): | ||
1742 | 230 | continue | ||
1743 | 231 | |||
1744 | 232 | # Set the version_detail string | ||
1745 | 233 | version_detail = "device=%s" % version | ||
1746 | 234 | |||
1747 | 235 | # Extract the hashes | ||
1748 | 236 | boot_hash = None | ||
1749 | 237 | recovery_hash = None | ||
1750 | 238 | system_hash = None | ||
1751 | 239 | with open(os.path.join(cdimage_path, version, | ||
1752 | 240 | "SHA256SUMS"), "r") as fd: | ||
1753 | 241 | for line in fd: | ||
1754 | 242 | line = line.strip() | ||
1755 | 243 | if line.endswith(boot_path.split("/")[-1]): | ||
1756 | 244 | boot_hash = line.split()[0] | ||
1757 | 245 | elif line.endswith(recovery_path.split("/")[-1]): | ||
1758 | 246 | recovery_hash = line.split()[0] | ||
1759 | 247 | elif line.endswith(system_path.split("/")[-1]): | ||
1760 | 248 | system_hash = line.split()[0] | ||
1761 | 249 | |||
1762 | 250 | if boot_hash and recovery_hash and system_hash: | ||
1763 | 251 | break | ||
1764 | 252 | |||
1765 | 253 | if not boot_hash or not recovery_hash or not system_hash: | ||
1766 | 254 | continue | ||
1767 | 255 | |||
1768 | 256 | hash_string = "%s/%s/%s" % (boot_hash, recovery_hash, system_hash) | ||
1769 | 257 | global_hash = sha256(hash_string.encode('utf-8')).hexdigest() | ||
1770 | 258 | |||
1771 | 259 | # Generate the path | ||
1772 | 260 | path = os.path.join(conf.publish_path, "pool", | ||
1773 | 261 | "device-%s.tar.xz" % global_hash) | ||
1774 | 262 | |||
1775 | 263 | # Return pre-existing entries | ||
1776 | 264 | if os.path.exists(path): | ||
1777 | 265 | # Get the real version number (in case it got copied) | ||
1778 | 266 | if os.path.exists(path.replace(".tar.xz", ".json")): | ||
1779 | 267 | with open(path.replace(".tar.xz", ".json"), "r") as fd: | ||
1780 | 268 | metadata = json.loads(fd.read()) | ||
1781 | 269 | |||
1782 | 270 | if "version_detail" in metadata: | ||
1783 | 271 | version_detail = metadata['version_detail'] | ||
1784 | 272 | |||
1785 | 273 | environment['version_detail'].append(version_detail) | ||
1786 | 274 | return path | ||
1787 | 275 | |||
1788 | 276 | temp_dir = tempfile.mkdtemp() | ||
1789 | 277 | |||
1790 | 278 | # Generate a new tarball | ||
1791 | 279 | target_tarball = tarfile.open(os.path.join(temp_dir, "target.tar"), | ||
1792 | 280 | "w:") | ||
1793 | 281 | |||
1794 | 282 | # system image | ||
1795 | 283 | # # convert to raw image | ||
1796 | 284 | system_img = os.path.join(temp_dir, "system.img") | ||
1797 | 285 | with open(os.path.devnull, "w") as devnull: | ||
1798 | 286 | subprocess.call(["simg2img", system_path, system_img], | ||
1799 | 287 | stdout=devnull) | ||
1800 | 288 | |||
1801 | 289 | # # shrink to minimal size | ||
1802 | 290 | with open(os.path.devnull, "w") as devnull: | ||
1803 | 291 | subprocess.call(["resize2fs", "-M", system_img], | ||
1804 | 292 | stdout=devnull, stderr=devnull) | ||
1805 | 293 | |||
1806 | 294 | # # include in tarball | ||
1807 | 295 | target_tarball.add(system_img, | ||
1808 | 296 | arcname="system/var/lib/lxc/android/system.img", | ||
1809 | 297 | filter=root_ownership) | ||
1810 | 298 | |||
1811 | 299 | # boot image | ||
1812 | 300 | target_tarball.add(boot_path, arcname="partitions/boot.img", | ||
1813 | 301 | filter=root_ownership) | ||
1814 | 302 | |||
1815 | 303 | # recovery image | ||
1816 | 304 | target_tarball.add(recovery_path, | ||
1817 | 305 | arcname="partitions/recovery.img", | ||
1818 | 306 | filter=root_ownership) | ||
1819 | 307 | |||
1820 | 308 | target_tarball.close() | ||
1821 | 309 | |||
1822 | 310 | # Create the pool if it doesn't exist | ||
1823 | 311 | if not os.path.exists(os.path.join(conf.publish_path, "pool")): | ||
1824 | 312 | os.makedirs(os.path.join(conf.publish_path, "pool")) | ||
1825 | 313 | |||
1826 | 314 | # Compress the target tarball and sign it | ||
1827 | 315 | tools.xz_compress(os.path.join(temp_dir, "target.tar"), path) | ||
1828 | 316 | gpg.sign_file(conf, "image-signing", path) | ||
1829 | 317 | |||
1830 | 318 | # Generate the metadata file | ||
1831 | 319 | metadata = {} | ||
1832 | 320 | metadata['generator'] = "cdimage-device" | ||
1833 | 321 | metadata['version'] = version | ||
1834 | 322 | metadata['version_detail'] = version_detail | ||
1835 | 323 | metadata['series'] = series | ||
1836 | 324 | metadata['device'] = environment['device_name'] | ||
1837 | 325 | metadata['boot_path'] = boot_path | ||
1838 | 326 | metadata['boot_checksum'] = boot_hash | ||
1839 | 327 | metadata['recovery_path'] = recovery_path | ||
1840 | 328 | metadata['recovery_checksum'] = recovery_hash | ||
1841 | 329 | metadata['system_path'] = system_path | ||
1842 | 330 | metadata['system_checksum'] = system_hash | ||
1843 | 331 | |||
1844 | 332 | with open(path.replace(".tar.xz", ".json"), "w+") as fd: | ||
1845 | 333 | fd.write("%s\n" % json.dumps(metadata, sort_keys=True, | ||
1846 | 334 | indent=4, separators=(',', ': '))) | ||
1847 | 335 | gpg.sign_file(conf, "image-signing", path.replace(".tar.xz", ".json")) | ||
1848 | 336 | |||
1849 | 337 | # Cleanup | ||
1850 | 338 | shutil.rmtree(temp_dir) | ||
1851 | 339 | |||
1852 | 340 | environment['version_detail'].append(version_detail) | ||
1853 | 341 | return path | ||
1854 | 342 | |||
1855 | 343 | return None | ||
1856 | 344 | |||
1857 | 345 | |||
1858 | 346 | def generate_file_cdimage_ubuntu(conf, arguments, environment): | ||
1859 | 347 | """ | ||
1860 | 348 | Scan a cdimage tree for new ubuntu files. | ||
1861 | 349 | """ | ||
1862 | 350 | |||
1863 | 351 | # We need at least a path and a series | ||
1864 | 352 | if len(arguments) < 2: | ||
1865 | 353 | return None | ||
1866 | 354 | |||
1867 | 355 | # Read the arguments | ||
1868 | 356 | cdimage_path = arguments[0] | ||
1869 | 357 | series = arguments[1] | ||
1870 | 358 | |||
1871 | 359 | options = {} | ||
1872 | 360 | if len(arguments) > 2: | ||
1873 | 361 | options = unpack_arguments(arguments[2]) | ||
1874 | 362 | |||
1875 | 363 | arch = "armhf" | ||
1876 | 364 | if environment['device_name'] in ("generic_x86", "generic_i386"): | ||
1877 | 365 | arch = "i386" | ||
1878 | 366 | elif environment['device_name'] in ("generic_amd64",): | ||
1879 | 367 | arch = "amd64" | ||
1880 | 368 | |||
1881 | 369 | # Check that the directory exists | ||
1882 | 370 | if not os.path.exists(cdimage_path): | ||
1883 | 371 | return None | ||
1884 | 372 | |||
1885 | 373 | versions = sorted([version for version in os.listdir(cdimage_path) | ||
1886 | 374 | if version not in ("pending", "current")], | ||
1887 | 375 | reverse=True) | ||
1888 | 376 | |||
1889 | 377 | for version in versions: | ||
1890 | 378 | # Skip directory without checksums | ||
1891 | 379 | if not os.path.exists(os.path.join(cdimage_path, version, | ||
1892 | 380 | "SHA256SUMS")): | ||
1893 | 381 | continue | ||
1894 | 382 | |||
1895 | 383 | # Check for the rootfs | ||
1896 | 384 | rootfs_path = os.path.join(cdimage_path, version, | ||
1897 | 385 | "%s-preinstalled-%s-%s.tar.gz" % | ||
1898 | 386 | (series, options.get("product", "touch"), | ||
1899 | 387 | arch)) | ||
1900 | 388 | if not os.path.exists(rootfs_path): | ||
1901 | 389 | continue | ||
1902 | 390 | |||
1903 | 391 | # Check if we should only import tested images | ||
1904 | 392 | if options.get("import", "any") == "good": | ||
1905 | 393 | if not os.path.exists(os.path.join(cdimage_path, version, | ||
1906 | 394 | ".marked_good")): | ||
1907 | 395 | continue | ||
1908 | 396 | |||
1909 | 397 | # Set the version_detail string | ||
1910 | 398 | version_detail = "ubuntu=%s" % version | ||
1911 | 399 | |||
1912 | 400 | # Extract the hash | ||
1913 | 401 | rootfs_hash = None | ||
1914 | 402 | with open(os.path.join(cdimage_path, version, | ||
1915 | 403 | "SHA256SUMS"), "r") as fd: | ||
1916 | 404 | for line in fd: | ||
1917 | 405 | line = line.strip() | ||
1918 | 406 | if line.endswith(rootfs_path.split("/")[-1]): | ||
1919 | 407 | rootfs_hash = line.split()[0] | ||
1920 | 408 | break | ||
1921 | 409 | |||
1922 | 410 | if not rootfs_hash: | ||
1923 | 411 | continue | ||
1924 | 412 | |||
1925 | 413 | # Generate the path | ||
1926 | 414 | path = os.path.join(conf.publish_path, "pool", | ||
1927 | 415 | "ubuntu-%s.tar.xz" % rootfs_hash) | ||
1928 | 416 | |||
1929 | 417 | # Return pre-existing entries | ||
1930 | 418 | if os.path.exists(path): | ||
1931 | 419 | # Get the real version number (in case it got copied) | ||
1932 | 420 | if os.path.exists(path.replace(".tar.xz", ".json")): | ||
1933 | 421 | with open(path.replace(".tar.xz", ".json"), "r") as fd: | ||
1934 | 422 | metadata = json.loads(fd.read()) | ||
1935 | 423 | |||
1936 | 424 | if "version_detail" in metadata: | ||
1937 | 425 | version_detail = metadata['version_detail'] | ||
1938 | 426 | |||
1939 | 427 | environment['version_detail'].append(version_detail) | ||
1940 | 428 | return path | ||
1941 | 429 | |||
1942 | 430 | temp_dir = tempfile.mkdtemp() | ||
1943 | 431 | |||
1944 | 432 | # Unpack the source tarball | ||
1945 | 433 | tools.gzip_uncompress(rootfs_path, os.path.join(temp_dir, | ||
1946 | 434 | "source.tar")) | ||
1947 | 435 | |||
1948 | 436 | # Generate a new shifted tarball | ||
1949 | 437 | source_tarball = tarfile.open(os.path.join(temp_dir, "source.tar"), | ||
1950 | 438 | "r:") | ||
1951 | 439 | target_tarball = tarfile.open(os.path.join(temp_dir, "target.tar"), | ||
1952 | 440 | "w:") | ||
1953 | 441 | |||
1954 | 442 | added = [] | ||
1955 | 443 | for entry in source_tarball: | ||
1956 | 444 | # FIXME: Will need to be done on the real rootfs | ||
1957 | 445 | # Skip some files | ||
1958 | 446 | if entry.name in ("SWAP.swap", "etc/mtab"): | ||
1959 | 447 | continue | ||
1960 | 448 | |||
1961 | 449 | fileptr = None | ||
1962 | 450 | if entry.isfile(): | ||
1963 | 451 | try: | ||
1964 | 452 | fileptr = source_tarball.extractfile(entry.name) | ||
1965 | 453 | except KeyError: # pragma: no cover | ||
1966 | 454 | pass | ||
1967 | 455 | |||
1968 | 456 | # Update hardlinks to point to the right target | ||
1969 | 457 | if entry.islnk(): | ||
1970 | 458 | entry.linkname = "system/%s" % entry.linkname | ||
1971 | 459 | |||
1972 | 460 | entry.name = "system/%s" % entry.name | ||
1973 | 461 | target_tarball.addfile(entry, fileobj=fileptr) | ||
1974 | 462 | added.append(entry.name) | ||
1975 | 463 | |||
1976 | 464 | if options.get("product", "touch") == "touch": | ||
1977 | 465 | # FIXME: Will need to be done on the real rootfs | ||
1978 | 466 | # Add some symlinks and directories | ||
1979 | 467 | # # /android | ||
1980 | 468 | new_file = tarfile.TarInfo() | ||
1981 | 469 | new_file.type = tarfile.DIRTYPE | ||
1982 | 470 | new_file.name = "system/android" | ||
1983 | 471 | new_file.mode = 0o755 | ||
1984 | 472 | new_file.mtime = int(time.strftime("%s", time.localtime())) | ||
1985 | 473 | new_file.uname = "root" | ||
1986 | 474 | new_file.gname = "root" | ||
1987 | 475 | target_tarball.addfile(new_file) | ||
1988 | 476 | |||
1989 | 477 | # # Android partitions | ||
1990 | 478 | for android_path in ("cache", "data", "factory", "firmware", | ||
1991 | 479 | "persist", "system"): | ||
1992 | 480 | new_file = tarfile.TarInfo() | ||
1993 | 481 | new_file.type = tarfile.SYMTYPE | ||
1994 | 482 | new_file.name = "system/%s" % android_path | ||
1995 | 483 | new_file.linkname = "/android/%s" % android_path | ||
1996 | 484 | new_file.mode = 0o755 | ||
1997 | 485 | new_file.mtime = int(time.strftime("%s", time.localtime())) | ||
1998 | 486 | new_file.uname = "root" | ||
1999 | 487 | new_file.gname = "root" | ||
2000 | 488 | target_tarball.addfile(new_file) | ||
2001 | 489 | |||
2002 | 490 | # # /vendor | ||
2003 | 491 | new_file = tarfile.TarInfo() | ||
2004 | 492 | new_file.type = tarfile.SYMTYPE | ||
2005 | 493 | new_file.name = "system/vendor" | ||
2006 | 494 | new_file.linkname = "/android/system/vendor" | ||
2007 | 495 | new_file.mode = 0o755 | ||
2008 | 496 | new_file.mtime = int(time.strftime("%s", time.localtime())) | ||
2009 | 497 | new_file.uname = "root" | ||
2010 | 498 | new_file.gname = "root" | ||
2011 | 499 | target_tarball.addfile(new_file) | ||
2012 | 500 | |||
2013 | 501 | # # /userdata | ||
2014 | 502 | new_file = tarfile.TarInfo() | ||
2015 | 503 | new_file.type = tarfile.DIRTYPE | ||
2016 | 504 | new_file.name = "system/userdata" | ||
2017 | 505 | new_file.mode = 0o755 | ||
2018 | 506 | new_file.mtime = int(time.strftime("%s", time.localtime())) | ||
2019 | 507 | new_file.uname = "root" | ||
2020 | 508 | new_file.gname = "root" | ||
2021 | 509 | target_tarball.addfile(new_file) | ||
2022 | 510 | |||
2023 | 511 | # # /etc/mtab | ||
2024 | 512 | new_file = tarfile.TarInfo() | ||
2025 | 513 | new_file.type = tarfile.SYMTYPE | ||
2026 | 514 | new_file.name = "system/etc/mtab" | ||
2027 | 515 | new_file.linkname = "/proc/mounts" | ||
2028 | 516 | new_file.mode = 0o444 | ||
2029 | 517 | new_file.mtime = int(time.strftime("%s", time.localtime())) | ||
2030 | 518 | new_file.uname = "root" | ||
2031 | 519 | new_file.gname = "root" | ||
2032 | 520 | target_tarball.addfile(new_file) | ||
2033 | 521 | |||
2034 | 522 | # # /lib/modules | ||
2035 | 523 | new_file = tarfile.TarInfo() | ||
2036 | 524 | new_file.type = tarfile.DIRTYPE | ||
2037 | 525 | new_file.name = "system/lib/modules" | ||
2038 | 526 | new_file.mode = 0o755 | ||
2039 | 527 | new_file.mtime = int(time.strftime("%s", time.localtime())) | ||
2040 | 528 | new_file.uname = "root" | ||
2041 | 529 | new_file.gname = "root" | ||
2042 | 530 | target_tarball.addfile(new_file) | ||
2043 | 531 | |||
2044 | 532 | source_tarball.close() | ||
2045 | 533 | target_tarball.close() | ||
2046 | 534 | |||
2047 | 535 | # Create the pool if it doesn't exist | ||
2048 | 536 | if not os.path.exists(os.path.join(conf.publish_path, "pool")): | ||
2049 | 537 | os.makedirs(os.path.join(conf.publish_path, "pool")) | ||
2050 | 538 | |||
2051 | 539 | # Compress the target tarball and sign it | ||
2052 | 540 | tools.xz_compress(os.path.join(temp_dir, "target.tar"), path) | ||
2053 | 541 | gpg.sign_file(conf, "image-signing", path) | ||
2054 | 542 | |||
2055 | 543 | # Generate the metadata file | ||
2056 | 544 | metadata = {} | ||
2057 | 545 | metadata['generator'] = "cdimage-ubuntu" | ||
2058 | 546 | metadata['version'] = version | ||
2059 | 547 | metadata['version_detail'] = version_detail | ||
2060 | 548 | metadata['series'] = series | ||
2061 | 549 | metadata['rootfs_path'] = rootfs_path | ||
2062 | 550 | metadata['rootfs_checksum'] = rootfs_hash | ||
2063 | 551 | |||
2064 | 552 | with open(path.replace(".tar.xz", ".json"), "w+") as fd: | ||
2065 | 553 | fd.write("%s\n" % json.dumps(metadata, sort_keys=True, | ||
2066 | 554 | indent=4, separators=(',', ': '))) | ||
2067 | 555 | gpg.sign_file(conf, "image-signing", path.replace(".tar.xz", ".json")) | ||
2068 | 556 | |||
2069 | 557 | # Cleanup | ||
2070 | 558 | shutil.rmtree(temp_dir) | ||
2071 | 559 | |||
2072 | 560 | environment['version_detail'].append(version_detail) | ||
2073 | 561 | return path | ||
2074 | 562 | |||
2075 | 563 | return None | ||
2076 | 564 | |||
2077 | 565 | |||
2078 | 566 | def generate_file_cdimage_custom(conf, arguments, environment): | ||
2079 | 567 | """ | ||
2080 | 568 | Scan a cdimage tree for new custom files. | ||
2081 | 569 | """ | ||
2082 | 570 | |||
2083 | 571 | # We need at least a path and a series | ||
2084 | 572 | if len(arguments) < 2: | ||
2085 | 573 | return None | ||
2086 | 574 | |||
2087 | 575 | # Read the arguments | ||
2088 | 576 | cdimage_path = arguments[0] | ||
2089 | 577 | series = arguments[1] | ||
2090 | 578 | |||
2091 | 579 | options = {} | ||
2092 | 580 | if len(arguments) > 2: | ||
2093 | 581 | options = unpack_arguments(arguments[2]) | ||
2094 | 582 | |||
2095 | 583 | arch = "armhf" | ||
2096 | 584 | if environment['device_name'] in ("generic_x86", "generic_i386"): | ||
2097 | 585 | arch = "i386" | ||
2098 | 586 | elif environment['device_name'] in ("generic_amd64",): | ||
2099 | 587 | arch = "amd64" | ||
2100 | 588 | |||
2101 | 589 | # Check that the directory exists | ||
2102 | 590 | if not os.path.exists(cdimage_path): | ||
2103 | 591 | return None | ||
2104 | 592 | |||
2105 | 593 | versions = sorted([version for version in os.listdir(cdimage_path) | ||
2106 | 594 | if version not in ("pending", "current")], | ||
2107 | 595 | reverse=True) | ||
2108 | 596 | |||
2109 | 597 | for version in versions: | ||
2110 | 598 | # Skip directory without checksums | ||
2111 | 599 | if not os.path.exists(os.path.join(cdimage_path, version, | ||
2112 | 600 | "SHA256SUMS")): | ||
2113 | 601 | continue | ||
2114 | 602 | |||
2115 | 603 | # Check for the custom tarball | ||
2116 | 604 | custom_path = os.path.join(cdimage_path, version, | ||
2117 | 605 | "%s-preinstalled-%s-%s.custom.tar.gz" % | ||
2118 | 606 | (series, options.get("product", "touch"), | ||
2119 | 607 | arch)) | ||
2120 | 608 | if not os.path.exists(custom_path): | ||
2121 | 609 | continue | ||
2122 | 610 | |||
2123 | 611 | # Check if we should only import tested images | ||
2124 | 612 | if options.get("import", "any") == "good": | ||
2125 | 613 | if not os.path.exists(os.path.join(cdimage_path, version, | ||
2126 | 614 | ".marked_good")): | ||
2127 | 615 | continue | ||
2128 | 616 | |||
2129 | 617 | # Set the version_detail string | ||
2130 | 618 | version_detail = "custom=%s" % version | ||
2131 | 619 | |||
2132 | 620 | # Extract the hash | ||
2133 | 621 | custom_hash = None | ||
2134 | 622 | with open(os.path.join(cdimage_path, version, | ||
2135 | 623 | "SHA256SUMS"), "r") as fd: | ||
2136 | 624 | for line in fd: | ||
2137 | 625 | line = line.strip() | ||
2138 | 626 | if line.endswith(custom_path.split("/")[-1]): | ||
2139 | 627 | custom_hash = line.split()[0] | ||
2140 | 628 | break | ||
2141 | 629 | |||
2142 | 630 | if not custom_hash: | ||
2143 | 631 | continue | ||
2144 | 632 | |||
2145 | 633 | # Generate the path | ||
2146 | 634 | path = os.path.join(conf.publish_path, "pool", | ||
2147 | 635 | "custom-%s.tar.xz" % custom_hash) | ||
2148 | 636 | |||
2149 | 637 | # Return pre-existing entries | ||
2150 | 638 | if os.path.exists(path): | ||
2151 | 639 | # Get the real version number (in case it got copied) | ||
2152 | 640 | if os.path.exists(path.replace(".tar.xz", ".json")): | ||
2153 | 641 | with open(path.replace(".tar.xz", ".json"), "r") as fd: | ||
2154 | 642 | metadata = json.loads(fd.read()) | ||
2155 | 643 | |||
2156 | 644 | if "version_detail" in metadata: | ||
2157 | 645 | version_detail = metadata['version_detail'] | ||
2158 | 646 | |||
2159 | 647 | environment['version_detail'].append(version_detail) | ||
2160 | 648 | return path | ||
2161 | 649 | |||
2162 | 650 | temp_dir = tempfile.mkdtemp() | ||
2163 | 651 | |||
2164 | 652 | # Unpack the source tarball | ||
2165 | 653 | tools.gzip_uncompress(custom_path, os.path.join(temp_dir, | ||
2166 | 654 | "source.tar")) | ||
2167 | 655 | |||
2168 | 656 | # Create the pool if it doesn't exist | ||
2169 | 657 | if not os.path.exists(os.path.join(conf.publish_path, "pool")): | ||
2170 | 658 | os.makedirs(os.path.join(conf.publish_path, "pool")) | ||
2171 | 659 | |||
2172 | 660 | # Compress the target tarball and sign it | ||
2173 | 661 | tools.xz_compress(os.path.join(temp_dir, "source.tar"), path) | ||
2174 | 662 | gpg.sign_file(conf, "image-signing", path) | ||
2175 | 663 | |||
2176 | 664 | # Generate the metadata file | ||
2177 | 665 | metadata = {} | ||
2178 | 666 | metadata['generator'] = "cdimage-custom" | ||
2179 | 667 | metadata['version'] = version | ||
2180 | 668 | metadata['version_detail'] = version_detail | ||
2181 | 669 | metadata['series'] = series | ||
2182 | 670 | metadata['custom_path'] = custom_path | ||
2183 | 671 | metadata['custom_checksum'] = custom_hash | ||
2184 | 672 | |||
2185 | 673 | with open(path.replace(".tar.xz", ".json"), "w+") as fd: | ||
2186 | 674 | fd.write("%s\n" % json.dumps(metadata, sort_keys=True, | ||
2187 | 675 | indent=4, separators=(',', ': '))) | ||
2188 | 676 | gpg.sign_file(conf, "image-signing", path.replace(".tar.xz", ".json")) | ||
2189 | 677 | |||
2190 | 678 | # Cleanup | ||
2191 | 679 | shutil.rmtree(temp_dir) | ||
2192 | 680 | |||
2193 | 681 | environment['version_detail'].append(version_detail) | ||
2194 | 682 | return path | ||
2195 | 683 | |||
2196 | 684 | return None | ||
2197 | 685 | |||
2198 | 686 | |||
2199 | 687 | def generate_file_http(conf, arguments, environment): | ||
2200 | 688 | """ | ||
2201 | 689 | Grab, cache and returns a file using http/https. | ||
2202 | 690 | """ | ||
2203 | 691 | |||
2204 | 692 | # We need at least a URL | ||
2205 | 693 | if len(arguments) == 0: | ||
2206 | 694 | return None | ||
2207 | 695 | |||
2208 | 696 | # Read the arguments | ||
2209 | 697 | url = arguments[0] | ||
2210 | 698 | |||
2211 | 699 | options = {} | ||
2212 | 700 | if len(arguments) > 1: | ||
2213 | 701 | options = unpack_arguments(arguments[1]) | ||
2214 | 702 | |||
2215 | 703 | path = None | ||
2216 | 704 | version = None | ||
2217 | 705 | |||
2218 | 706 | if "http_%s" % url in CACHE: | ||
2219 | 707 | version = CACHE['http_%s' % url] | ||
2220 | 708 | |||
2221 | 709 | # Get the version/build number | ||
2222 | 710 | if "monitor" in options or version: | ||
2223 | 711 | if not version: | ||
2224 | 712 | # Grab the current version number | ||
2225 | 713 | old_timeout = socket.getdefaulttimeout() | ||
2226 | 714 | socket.setdefaulttimeout(5) | ||
2227 | 715 | try: | ||
2228 | 716 | version = urlopen(options['monitor']).read().strip() | ||
2229 | 717 | except socket.timeout: | ||
2230 | 718 | return None | ||
2231 | 719 | except IOError: | ||
2232 | 720 | return None | ||
2233 | 721 | socket.setdefaulttimeout(old_timeout) | ||
2234 | 722 | |||
2235 | 723 | # Validate the version number | ||
2236 | 724 | if not version or len(version.split("\n")) > 1: | ||
2237 | 725 | return None | ||
2238 | 726 | |||
2239 | 727 | # Push the result in the cache | ||
2240 | 728 | CACHE['http_%s' % url] = version | ||
2241 | 729 | |||
2242 | 730 | # Set version_detail | ||
2243 | 731 | version_detail = "%s=%s" % (options.get("name", "http"), version) | ||
2244 | 732 | |||
2245 | 733 | # FIXME: can be dropped once all the non-hased tarballs are gone | ||
2246 | 734 | old_path = os.path.realpath(os.path.join(conf.publish_path, "pool", | ||
2247 | 735 | "%s-%s.tar.xz" % | ||
2248 | 736 | (options.get("name", "http"), | ||
2249 | 737 | version))) | ||
2250 | 738 | if os.path.exists(old_path): | ||
2251 | 739 | # Get the real version number (in case it got copied) | ||
2252 | 740 | if os.path.exists(old_path.replace(".tar.xz", ".json")): | ||
2253 | 741 | with open(old_path.replace(".tar.xz", ".json"), "r") as fd: | ||
2254 | 742 | metadata = json.loads(fd.read()) | ||
2255 | 743 | |||
2256 | 744 | if "version_detail" in metadata: | ||
2257 | 745 | version_detail = metadata['version_detail'] | ||
2258 | 746 | |||
2259 | 747 | environment['version_detail'].append(version_detail) | ||
2260 | 748 | return old_path | ||
2261 | 749 | |||
2262 | 750 | # Build the path, hasing together the URL and version | ||
2263 | 751 | hash_string = "%s:%s" % (url, version) | ||
2264 | 752 | global_hash = sha256(hash_string.encode('utf-8')).hexdigest() | ||
2265 | 753 | path = os.path.realpath(os.path.join(conf.publish_path, "pool", | ||
2266 | 754 | "%s-%s.tar.xz" % | ||
2267 | 755 | (options.get("name", "http"), | ||
2268 | 756 | global_hash))) | ||
2269 | 757 | |||
2270 | 758 | # Return pre-existing entries | ||
2271 | 759 | if os.path.exists(path): | ||
2272 | 760 | # Get the real version number (in case it got copied) | ||
2273 | 761 | if os.path.exists(path.replace(".tar.xz", ".json")): | ||
2274 | 762 | with open(path.replace(".tar.xz", ".json"), "r") as fd: | ||
2275 | 763 | metadata = json.loads(fd.read()) | ||
2276 | 764 | |||
2277 | 765 | if "version_detail" in metadata: | ||
2278 | 766 | version_detail = metadata['version_detail'] | ||
2279 | 767 | |||
2280 | 768 | environment['version_detail'].append(version_detail) | ||
2281 | 769 | return path | ||
2282 | 770 | |||
2283 | 771 | # Grab the real thing | ||
2284 | 772 | tempdir = tempfile.mkdtemp() | ||
2285 | 773 | old_timeout = socket.getdefaulttimeout() | ||
2286 | 774 | socket.setdefaulttimeout(5) | ||
2287 | 775 | try: | ||
2288 | 776 | urlretrieve(url, os.path.join(tempdir, "download")) | ||
2289 | 777 | except socket.timeout: | ||
2290 | 778 | shutil.rmtree(tempdir) | ||
2291 | 779 | return None | ||
2292 | 780 | except IOError: | ||
2293 | 781 | shutil.rmtree(tempdir) | ||
2294 | 782 | return None | ||
2295 | 783 | socket.setdefaulttimeout(old_timeout) | ||
2296 | 784 | |||
2297 | 785 | # Hash it if we don't have a version number | ||
2298 | 786 | if not version: | ||
2299 | 787 | # Hash the file | ||
2300 | 788 | with open(os.path.join(tempdir, "download"), "rb") as fd: | ||
2301 | 789 | version = sha256(fd.read()).hexdigest() | ||
2302 | 790 | |||
2303 | 791 | # Set version_detail | ||
2304 | 792 | version_detail = "%s=%s" % (options.get("name", "http"), version) | ||
2305 | 793 | |||
2306 | 794 | # Push the result in the cache | ||
2307 | 795 | CACHE['http_%s' % url] = version | ||
2308 | 796 | |||
2309 | 797 | # Build the path | ||
2310 | 798 | path = os.path.realpath(os.path.join(conf.publish_path, "pool", | ||
2311 | 799 | "%s-%s.tar.xz" % | ||
2312 | 800 | (options.get("name", "http"), | ||
2313 | 801 | version))) | ||
2314 | 802 | # Return pre-existing entries | ||
2315 | 803 | if os.path.exists(path): | ||
2316 | 804 | # Get the real version number (in case it got copied) | ||
2317 | 805 | if os.path.exists(path.replace(".tar.xz", ".json")): | ||
2318 | 806 | with open(path.replace(".tar.xz", ".json"), "r") as fd: | ||
2319 | 807 | metadata = json.loads(fd.read()) | ||
2320 | 808 | |||
2321 | 809 | if "version_detail" in metadata: | ||
2322 | 810 | version_detail = metadata['version_detail'] | ||
2323 | 811 | |||
2324 | 812 | environment['version_detail'].append(version_detail) | ||
2325 | 813 | shutil.rmtree(tempdir) | ||
2326 | 814 | return path | ||
2327 | 815 | |||
2328 | 816 | # Create the pool if it doesn't exist | ||
2329 | 817 | if not os.path.exists(os.path.join(conf.publish_path, "pool")): | ||
2330 | 818 | os.makedirs(os.path.join(conf.publish_path, "pool")) | ||
2331 | 819 | |||
2332 | 820 | # Move the file to the pool and sign it | ||
2333 | 821 | shutil.move(os.path.join(tempdir, "download"), path) | ||
2334 | 822 | gpg.sign_file(conf, "image-signing", path) | ||
2335 | 823 | |||
2336 | 824 | # Generate the metadata file | ||
2337 | 825 | metadata = {} | ||
2338 | 826 | metadata['generator'] = "http" | ||
2339 | 827 | metadata['version'] = version | ||
2340 | 828 | metadata['version_detail'] = version_detail | ||
2341 | 829 | metadata['url'] = url | ||
2342 | 830 | |||
2343 | 831 | with open(path.replace(".tar.xz", ".json"), "w+") as fd: | ||
2344 | 832 | fd.write("%s\n" % json.dumps(metadata, sort_keys=True, | ||
2345 | 833 | indent=4, separators=(',', ': '))) | ||
2346 | 834 | gpg.sign_file(conf, "image-signing", path.replace(".tar.xz", ".json")) | ||
2347 | 835 | |||
2348 | 836 | # Cleanup | ||
2349 | 837 | shutil.rmtree(tempdir) | ||
2350 | 838 | |||
2351 | 839 | environment['version_detail'].append(version_detail) | ||
2352 | 840 | return path | ||
2353 | 841 | |||
2354 | 842 | |||
2355 | 843 | def generate_file_keyring(conf, arguments, environment): | ||
2356 | 844 | """ | ||
2357 | 845 | Generate a keyring tarball or return a pre-existing one. | ||
2358 | 846 | """ | ||
2359 | 847 | |||
2360 | 848 | # Don't generate keyring tarballs when nothing changed | ||
2361 | 849 | if len(environment['new_files']) == 0: | ||
2362 | 850 | return None | ||
2363 | 851 | |||
2364 | 852 | # We need a keyring name | ||
2365 | 853 | if len(arguments) == 0: | ||
2366 | 854 | return None | ||
2367 | 855 | |||
2368 | 856 | # Read the arguments | ||
2369 | 857 | keyring_name = arguments[0] | ||
2370 | 858 | keyring_path = os.path.join(conf.gpg_keyring_path, keyring_name) | ||
2371 | 859 | |||
2372 | 860 | # Fail on missing keyring | ||
2373 | 861 | if not os.path.exists("%s.tar.xz" % keyring_path) or \ | ||
2374 | 862 | not os.path.exists("%s.tar.xz.asc" % keyring_path): | ||
2375 | 863 | return None | ||
2376 | 864 | |||
2377 | 865 | with open("%s.tar.xz" % keyring_path, "rb") as fd: | ||
2378 | 866 | hash_tarball = sha256(fd.read()).hexdigest() | ||
2379 | 867 | |||
2380 | 868 | with open("%s.tar.xz.asc" % keyring_path, "rb") as fd: | ||
2381 | 869 | hash_signature = sha256(fd.read()).hexdigest() | ||
2382 | 870 | |||
2383 | 871 | hash_string = "%s/%s" % (hash_tarball, hash_signature) | ||
2384 | 872 | global_hash = sha256(hash_string.encode('utf-8')).hexdigest() | ||
2385 | 873 | |||
2386 | 874 | # Build the path | ||
2387 | 875 | path = os.path.realpath(os.path.join(conf.publish_path, "pool", | ||
2388 | 876 | "keyring-%s.tar.xz" % | ||
2389 | 877 | global_hash)) | ||
2390 | 878 | |||
2391 | 879 | # Set the version_detail string | ||
2392 | 880 | environment['version_detail'].append("keyring=%s" % keyring_name) | ||
2393 | 881 | |||
2394 | 882 | # Don't bother re-generating a file if it already exists | ||
2395 | 883 | if os.path.exists(path): | ||
2396 | 884 | return path | ||
2397 | 885 | |||
2398 | 886 | # Create temporary directory | ||
2399 | 887 | tempdir = tempfile.mkdtemp() | ||
2400 | 888 | |||
2401 | 889 | # Generate the tarball | ||
2402 | 890 | tarball = tarfile.open(os.path.join(tempdir, "output.tar"), "w:") | ||
2403 | 891 | tarball.add("%s.tar.xz" % keyring_path, | ||
2404 | 892 | arcname="/system/etc/system-image/archive-master.tar.xz", | ||
2405 | 893 | filter=root_ownership) | ||
2406 | 894 | tarball.add("%s.tar.xz.asc" % keyring_path, | ||
2407 | 895 | arcname="/system/etc/system-image/archive-master.tar.xz.asc", | ||
2408 | 896 | filter=root_ownership) | ||
2409 | 897 | tarball.close() | ||
2410 | 898 | |||
2411 | 899 | # Create the pool if it doesn't exist | ||
2412 | 900 | if not os.path.exists(os.path.join(conf.publish_path, "pool")): | ||
2413 | 901 | os.makedirs(os.path.join(conf.publish_path, "pool")) | ||
2414 | 902 | |||
2415 | 903 | # Compress and sign it | ||
2416 | 904 | tools.xz_compress(os.path.join(tempdir, "output.tar"), path) | ||
2417 | 905 | gpg.sign_file(conf, "image-signing", path) | ||
2418 | 906 | |||
2419 | 907 | # Generate the metadata file | ||
2420 | 908 | metadata = {} | ||
2421 | 909 | metadata['generator'] = "keyring" | ||
2422 | 910 | metadata['version'] = global_hash | ||
2423 | 911 | metadata['version_detail'] = "keyring=%s" % keyring_name | ||
2424 | 912 | metadata['path'] = keyring_path | ||
2425 | 913 | |||
2426 | 914 | with open(path.replace(".tar.xz", ".json"), "w+") as fd: | ||
2427 | 915 | fd.write("%s\n" % json.dumps(metadata, sort_keys=True, | ||
2428 | 916 | indent=4, separators=(',', ': '))) | ||
2429 | 917 | gpg.sign_file(conf, "image-signing", path.replace(".tar.xz", ".json")) | ||
2430 | 918 | |||
2431 | 919 | # Cleanup | ||
2432 | 920 | shutil.rmtree(tempdir) | ||
2433 | 921 | |||
2434 | 922 | return path | ||
2435 | 923 | |||
2436 | 924 | |||
2437 | 925 | def generate_file_remote_system_image(conf, arguments, environment): | ||
2438 | 926 | """ | ||
2439 | 927 | Import files from a remote system-image server | ||
2440 | 928 | """ | ||
2441 | 929 | |||
2442 | 930 | # We need at least a channel name and a file prefix | ||
2443 | 931 | if len(arguments) < 3: | ||
2444 | 932 | return None | ||
2445 | 933 | |||
2446 | 934 | # Read the arguments | ||
2447 | 935 | base_url = arguments[0] | ||
2448 | 936 | channel_name = arguments[1] | ||
2449 | 937 | prefix = arguments[2] | ||
2450 | 938 | |||
2451 | 939 | options = {} | ||
2452 | 940 | if len(arguments) > 3: | ||
2453 | 941 | options = unpack_arguments(arguments[3]) | ||
2454 | 942 | |||
2455 | 943 | device_name = environment['device_name'] | ||
2456 | 944 | if 'device' in options: | ||
2457 | 945 | device_name = options['device'] | ||
2458 | 946 | |||
2459 | 947 | # Fetch and validate the remote channels.json | ||
2460 | 948 | old_timeout = socket.getdefaulttimeout() | ||
2461 | 949 | socket.setdefaulttimeout(5) | ||
2462 | 950 | try: | ||
2463 | 951 | channel_json = json.loads(urlopen("%s/channels.json" % | ||
2464 | 952 | base_url).read().decode().strip()) | ||
2465 | 953 | except socket.timeout: | ||
2466 | 954 | return None | ||
2467 | 955 | except IOError: | ||
2468 | 956 | return None | ||
2469 | 957 | socket.setdefaulttimeout(old_timeout) | ||
2470 | 958 | |||
2471 | 959 | if channel_name not in channel_json: | ||
2472 | 960 | return None | ||
2473 | 961 | |||
2474 | 962 | if "devices" not in channel_json[channel_name]: | ||
2475 | 963 | return None | ||
2476 | 964 | |||
2477 | 965 | if device_name not in channel_json[channel_name]['devices']: | ||
2478 | 966 | return None | ||
2479 | 967 | |||
2480 | 968 | if "index" not in (channel_json[channel_name]['devices'] | ||
2481 | 969 | [device_name]): | ||
2482 | 970 | return None | ||
2483 | 971 | |||
2484 | 972 | index_url = "%s/%s" % (base_url, channel_json[channel_name]['devices'] | ||
2485 | 973 | [device_name]['index']) | ||
2486 | 974 | |||
2487 | 975 | # Fetch and validate the remote index.json | ||
2488 | 976 | old_timeout = socket.getdefaulttimeout() | ||
2489 | 977 | socket.setdefaulttimeout(5) | ||
2490 | 978 | try: | ||
2491 | 979 | index_json = json.loads(urlopen(index_url).read().decode()) | ||
2492 | 980 | except socket.timeout: | ||
2493 | 981 | return None | ||
2494 | 982 | except IOError: | ||
2495 | 983 | return None | ||
2496 | 984 | socket.setdefaulttimeout(old_timeout) | ||
2497 | 985 | |||
2498 | 986 | # Grab the list of full images | ||
2499 | 987 | full_images = sorted([image for image in index_json['images'] | ||
2500 | 988 | if image['type'] == "full"], | ||
2501 | 989 | key=lambda image: image['version']) | ||
2502 | 990 | |||
2503 | 991 | # No images | ||
2504 | 992 | if not full_images: | ||
2505 | 993 | return None | ||
2506 | 994 | |||
2507 | 995 | # Found an image, so let's try to find a match | ||
2508 | 996 | for file_entry in full_images[-1]['files']: | ||
2509 | 997 | file_name = file_entry['path'].split("/")[-1] | ||
2510 | 998 | file_prefix = file_name.rsplit("-", 1)[0] | ||
2511 | 999 | if file_prefix == prefix: | ||
2512 | 1000 | path = os.path.realpath("%s/%s" % (conf.publish_path, | ||
2513 | 1001 | file_entry['path'])) | ||
2514 | 1002 | if os.path.exists(path): | ||
2515 | 1003 | return path | ||
2516 | 1004 | |||
2517 | 1005 | # Create the target if needed | ||
2518 | 1006 | if not os.path.exists(os.path.dirname(path)): | ||
2519 | 1007 | os.makedirs(os.path.dirname(path)) | ||
2520 | 1008 | |||
2521 | 1009 | # Grab the file | ||
2522 | 1010 | file_url = "%s/%s" % (base_url, file_entry['path']) | ||
2523 | 1011 | socket.setdefaulttimeout(5) | ||
2524 | 1012 | try: | ||
2525 | 1013 | urlretrieve(file_url, path) | ||
2526 | 1014 | except socket.timeout: | ||
2527 | 1015 | if os.path.exists(path): | ||
2528 | 1016 | os.remove(path) | ||
2529 | 1017 | return None | ||
2530 | 1018 | except IOError: | ||
2531 | 1019 | if os.path.exists(path): | ||
2532 | 1020 | os.remove(path) | ||
2533 | 1021 | return None | ||
2534 | 1022 | socket.setdefaulttimeout(old_timeout) | ||
2535 | 1023 | |||
2536 | 1024 | if "keyring" in options: | ||
2537 | 1025 | if not tools.repack_recovery_keyring(conf, path, | ||
2538 | 1026 | options['keyring']): | ||
2539 | 1027 | if os.path.exists(path): | ||
2540 | 1028 | os.remove(path) | ||
2541 | 1029 | return None | ||
2542 | 1030 | |||
2543 | 1031 | gpg.sign_file(conf, "image-signing", path) | ||
2544 | 1032 | |||
2545 | 1033 | # Attempt to grab an associated json | ||
2546 | 1034 | socket.setdefaulttimeout(5) | ||
2547 | 1035 | json_path = path.replace(".tar.xz", ".json") | ||
2548 | 1036 | json_url = file_url.replace(".tar.xz", ".json") | ||
2549 | 1037 | try: | ||
2550 | 1038 | urlretrieve(json_url, json_path), | ||
2551 | 1039 | except socket.timeout: | ||
2552 | 1040 | if os.path.exists(json_path): | ||
2553 | 1041 | os.remove(json_path) | ||
2554 | 1042 | except IOError: | ||
2555 | 1043 | if os.path.exists(json_path): | ||
2556 | 1044 | os.remove(json_path) | ||
2557 | 1045 | socket.setdefaulttimeout(old_timeout) | ||
2558 | 1046 | |||
2559 | 1047 | if os.path.exists(json_path): | ||
2560 | 1048 | gpg.sign_file(conf, "image-signing", json_path) | ||
2561 | 1049 | with open(json_path, "r") as fd: | ||
2562 | 1050 | metadata = json.loads(fd.read()) | ||
2563 | 1051 | |||
2564 | 1052 | if "version_detail" in metadata: | ||
2565 | 1053 | environment['version_detail'].append( | ||
2566 | 1054 | metadata['version_detail']) | ||
2567 | 1055 | |||
2568 | 1056 | return path | ||
2569 | 1057 | |||
2570 | 1058 | return None | ||
2571 | 1059 | |||
2572 | 1060 | |||
2573 | 1061 | def generate_file_system_image(conf, arguments, environment): | ||
2574 | 1062 | """ | ||
2575 | 1063 | Copy a file from another channel. | ||
2576 | 1064 | """ | ||
2577 | 1065 | |||
2578 | 1066 | # We need at least a channel name and a file prefix | ||
2579 | 1067 | if len(arguments) < 2: | ||
2580 | 1068 | return None | ||
2581 | 1069 | |||
2582 | 1070 | # Read the arguments | ||
2583 | 1071 | channel_name = arguments[0] | ||
2584 | 1072 | prefix = arguments[1] | ||
2585 | 1073 | |||
2586 | 1074 | # Run some checks | ||
2587 | 1075 | pub = tree.Tree(conf) | ||
2588 | 1076 | if channel_name not in pub.list_channels(): | ||
2589 | 1077 | return None | ||
2590 | 1078 | |||
2591 | 1079 | if (not environment['device_name'] in | ||
2592 | 1080 | pub.list_channels()[channel_name]['devices']): | ||
2593 | 1081 | return None | ||
2594 | 1082 | |||
2595 | 1083 | # Try to find the file | ||
2596 | 1084 | device = pub.get_device(channel_name, environment['device_name']) | ||
2597 | 1085 | |||
2598 | 1086 | full_images = sorted([image for image in device.list_images() | ||
2599 | 1087 | if image['type'] == "full"], | ||
2600 | 1088 | key=lambda image: image['version']) | ||
2601 | 1089 | |||
2602 | 1090 | # No images | ||
2603 | 1091 | if not full_images: | ||
2604 | 1092 | return None | ||
2605 | 1093 | |||
2606 | 1094 | # Found an image, so let's try to find a match | ||
2607 | 1095 | for file_entry in full_images[-1]['files']: | ||
2608 | 1096 | file_name = file_entry['path'].split("/")[-1] | ||
2609 | 1097 | file_prefix = file_name.rsplit("-", 1)[0] | ||
2610 | 1098 | if file_prefix == prefix: | ||
2611 | 1099 | path = os.path.realpath("%s/%s" % (conf.publish_path, | ||
2612 | 1100 | file_entry['path'])) | ||
2613 | 1101 | |||
2614 | 1102 | if os.path.exists(path.replace(".tar.xz", ".json")): | ||
2615 | 1103 | with open(path.replace(".tar.xz", ".json"), "r") as fd: | ||
2616 | 1104 | metadata = json.loads(fd.read()) | ||
2617 | 1105 | |||
2618 | 1106 | if "version_detail" in metadata: | ||
2619 | 1107 | environment['version_detail'].append( | ||
2620 | 1108 | metadata['version_detail']) | ||
2621 | 1109 | |||
2622 | 1110 | return path | ||
2623 | 1111 | |||
2624 | 1112 | return None | ||
2625 | 1113 | |||
2626 | 1114 | |||
2627 | 1115 | def generate_file_version(conf, arguments, environment): | ||
2628 | 1116 | """ | ||
2629 | 1117 | Generate a version tarball or return a pre-existing one. | ||
2630 | 1118 | """ | ||
2631 | 1119 | |||
2632 | 1120 | # Don't generate version tarballs when nothing changed | ||
2633 | 1121 | if len(environment['new_files']) == 0: | ||
2634 | 1122 | return None | ||
2635 | 1123 | |||
2636 | 1124 | path = os.path.realpath(os.path.join(environment['device'].path, | ||
2637 | 1125 | "version-%s.tar.xz" % environment['version'])) | ||
2638 | 1126 | |||
2639 | 1127 | # Set the version_detail string | ||
2640 | 1128 | environment['version_detail'].append("version=%s" % environment['version']) | ||
2641 | 1129 | |||
2642 | 1130 | # Don't bother re-generating a file if it already exists | ||
2643 | 1131 | if os.path.exists(path): | ||
2644 | 1132 | return path | ||
2645 | 1133 | |||
2646 | 1134 | # Generate version_detail | ||
2647 | 1135 | version_detail = ",".join(environment['version_detail']) | ||
2648 | 1136 | |||
2649 | 1137 | # Create temporary directory | ||
2650 | 1138 | tempdir = tempfile.mkdtemp() | ||
2651 | 1139 | |||
2652 | 1140 | # Generate the tarball | ||
2653 | 1141 | tools.generate_version_tarball( | ||
2654 | 1142 | conf, environment['channel_name'], environment['device_name'], | ||
2655 | 1143 | str(environment['version']), | ||
2656 | 1144 | os.path.join(tempdir, "version"), version_detail=version_detail) | ||
2657 | 1145 | |||
2658 | 1146 | # Create the pool if it doesn't exist | ||
2659 | 1147 | if not os.path.exists(os.path.join(environment['device'].path)): | ||
2660 | 1148 | os.makedirs(os.path.join(environment['device'].path)) | ||
2661 | 1149 | |||
2662 | 1150 | # Compress and sign it | ||
2663 | 1151 | tools.xz_compress(os.path.join(tempdir, "version"), path) | ||
2664 | 1152 | gpg.sign_file(conf, "image-signing", path) | ||
2665 | 1153 | |||
2666 | 1154 | # Generate the metadata file | ||
2667 | 1155 | metadata = {} | ||
2668 | 1156 | metadata['generator'] = "version" | ||
2669 | 1157 | metadata['version'] = environment['version'] | ||
2670 | 1158 | metadata['version_detail'] = "version=%s" % environment['version'] | ||
2671 | 1159 | metadata['channel.ini'] = {} | ||
2672 | 1160 | metadata['channel.ini']['channel'] = environment['channel_name'] | ||
2673 | 1161 | metadata['channel.ini']['device'] = environment['device_name'] | ||
2674 | 1162 | metadata['channel.ini']['version'] = str(environment['version']) | ||
2675 | 1163 | metadata['channel.ini']['version_detail'] = version_detail | ||
2676 | 1164 | |||
2677 | 1165 | with open(path.replace(".tar.xz", ".json"), "w+") as fd: | ||
2678 | 1166 | fd.write("%s\n" % json.dumps(metadata, sort_keys=True, | ||
2679 | 1167 | indent=4, separators=(',', ': '))) | ||
2680 | 1168 | gpg.sign_file(conf, "image-signing", path.replace(".tar.xz", ".json")) | ||
2681 | 1169 | |||
2682 | 1170 | # Cleanup | ||
2683 | 1171 | shutil.rmtree(tempdir) | ||
2684 | 1172 | |||
2685 | 1173 | return path | ||
2686 | 0 | 1174 | ||
2687 | === added file 'lib/systemimage/gpg.py' | |||
2688 | --- lib/systemimage/gpg.py 1970-01-01 00:00:00 +0000 | |||
2689 | +++ lib/systemimage/gpg.py 2014-10-10 11:11:17 +0000 | |||
2690 | @@ -0,0 +1,239 @@ | |||
2691 | 1 | # -*- coding: utf-8 -*- | ||
2692 | 2 | |||
2693 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
2694 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
2695 | 5 | |||
2696 | 6 | # This program is free software: you can redistribute it and/or modify | ||
2697 | 7 | # it under the terms of the GNU General Public License as published by | ||
2698 | 8 | # the Free Software Foundation; version 3 of the License. | ||
2699 | 9 | # | ||
2700 | 10 | # This program is distributed in the hope that it will be useful, | ||
2701 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
2702 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
2703 | 13 | # GNU General Public License for more details. | ||
2704 | 14 | # | ||
2705 | 15 | # You should have received a copy of the GNU General Public License | ||
2706 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
2707 | 17 | |||
2708 | 18 | import json | ||
2709 | 19 | import gpgme | ||
2710 | 20 | import os | ||
2711 | 21 | import tarfile | ||
2712 | 22 | |||
2713 | 23 | from io import BytesIO | ||
2714 | 24 | |||
2715 | 25 | |||
2716 | 26 | def generate_signing_key(keyring_path, key_name, key_email, key_expiry): | ||
2717 | 27 | """ | ||
2718 | 28 | Generate a new 2048bit RSA signing key. | ||
2719 | 29 | """ | ||
2720 | 30 | |||
2721 | 31 | if not os.path.isdir(keyring_path): | ||
2722 | 32 | raise Exception("Keyring path doesn't exist: %s" % keyring_path) | ||
2723 | 33 | |||
2724 | 34 | key_params = """<GnupgKeyParms format="internal"> | ||
2725 | 35 | Key-Type: RSA | ||
2726 | 36 | Key-Length: 2048 | ||
2727 | 37 | Key-Usage: sign | ||
2728 | 38 | Name-Real: %s | ||
2729 | 39 | Name-Email: %s | ||
2730 | 40 | Expire-Date: %s | ||
2731 | 41 | </GnupgKeyParms> | ||
2732 | 42 | """ % (key_name, key_email, key_expiry) | ||
2733 | 43 | |||
2734 | 44 | os.environ['GNUPGHOME'] = keyring_path | ||
2735 | 45 | |||
2736 | 46 | ctx = gpgme.Context() | ||
2737 | 47 | result = ctx.genkey(key_params) | ||
2738 | 48 | key = ctx.get_key(result.fpr, True) | ||
2739 | 49 | [uid] = key.uids | ||
2740 | 50 | |||
2741 | 51 | return uid | ||
2742 | 52 | |||
2743 | 53 | |||
2744 | 54 | def sign_file(config, key, path, destination=None, detach=True, armor=True): | ||
2745 | 55 | """ | ||
2746 | 56 | Sign a file and publish the signature. | ||
2747 | 57 | The key parameter must be a valid key under config.gpg_key_path. | ||
2748 | 58 | The path must be that of a valid file. | ||
2749 | 59 | The destination defaults to <path>.gpg (non-armored) or | ||
2750 | 60 | <path>.asc (armored). | ||
2751 | 61 | The detach and armor parameters respectively control the use of | ||
2752 | 62 | detached signatures and base64 armoring. | ||
2753 | 63 | """ | ||
2754 | 64 | |||
2755 | 65 | key_path = "%s/%s" % (config.gpg_key_path, key) | ||
2756 | 66 | |||
2757 | 67 | if not os.path.isdir(key_path): | ||
2758 | 68 | raise IndexError("Invalid GPG key name '%s'." % key) | ||
2759 | 69 | |||
2760 | 70 | if not os.path.isfile(path): | ||
2761 | 71 | raise Exception("Invalid path '%s'." % path) | ||
2762 | 72 | |||
2763 | 73 | if not destination: | ||
2764 | 74 | if armor: | ||
2765 | 75 | destination = "%s.asc" % path | ||
2766 | 76 | elif detach: | ||
2767 | 77 | destination = "%s.sig" % path | ||
2768 | 78 | else: | ||
2769 | 79 | destination = "%s.gpg" % path | ||
2770 | 80 | |||
2771 | 81 | if os.path.exists(destination): | ||
2772 | 82 | raise Exception("destination already exists.") | ||
2773 | 83 | |||
2774 | 84 | os.environ['GNUPGHOME'] = key_path | ||
2775 | 85 | |||
2776 | 86 | # Create a GPG context, assuming no passphrase | ||
2777 | 87 | ctx = gpgme.Context() | ||
2778 | 88 | ctx.armor = armor | ||
2779 | 89 | [key] = ctx.keylist() | ||
2780 | 90 | ctx.signers = [key] | ||
2781 | 91 | |||
2782 | 92 | with open(path, "rb") as fd_in, open(destination, "wb+") as fd_out: | ||
2783 | 93 | if detach: | ||
2784 | 94 | retval = ctx.sign(fd_in, fd_out, gpgme.SIG_MODE_DETACH) | ||
2785 | 95 | else: | ||
2786 | 96 | retval = ctx.sign(fd_in, fd_out, gpgme.SIG_MODE_NORMAL) | ||
2787 | 97 | |||
2788 | 98 | return retval | ||
2789 | 99 | |||
2790 | 100 | |||
2791 | 101 | class Keyring: | ||
2792 | 102 | """ | ||
2793 | 103 | Represents a keyring, let's you list/add/remove keys and change | ||
2794 | 104 | some of the keyring properties (type, expiration, target hardware) | ||
2795 | 105 | """ | ||
2796 | 106 | |||
2797 | 107 | keyring_name = None | ||
2798 | 108 | keyring_type = None | ||
2799 | 109 | keyring_expiry = None | ||
2800 | 110 | keyring_model = None | ||
2801 | 111 | keyring_path = None | ||
2802 | 112 | |||
2803 | 113 | def __init__(self, config, keyring_name): | ||
2804 | 114 | keyring_path = "%s/%s" % (config.gpg_keyring_path, keyring_name) | ||
2805 | 115 | |||
2806 | 116 | if not os.path.isdir(keyring_path): | ||
2807 | 117 | os.makedirs(keyring_path) | ||
2808 | 118 | |||
2809 | 119 | self.keyring_name = keyring_name | ||
2810 | 120 | self.keyring_path = keyring_path | ||
2811 | 121 | |||
2812 | 122 | if os.path.exists("%s/keyring.json" % keyring_path): | ||
2813 | 123 | with open("%s/keyring.json" % keyring_path, "r") as fd: | ||
2814 | 124 | keyring_json = json.loads(fd.read()) | ||
2815 | 125 | |||
2816 | 126 | self.keyring_type = keyring_json.get('type', None) | ||
2817 | 127 | self.keyring_expiry = keyring_json.get('expiry', None) | ||
2818 | 128 | self.keyring_model = keyring_json.get('model', None) | ||
2819 | 129 | else: | ||
2820 | 130 | open("%s/pubring.gpg" % keyring_path, "w+").close() | ||
2821 | 131 | |||
2822 | 132 | def generate_tarball(self, destination=None): | ||
2823 | 133 | """ | ||
2824 | 134 | Generate a tarball of the keyring and its json metadata. | ||
2825 | 135 | Returns the path to the tarball. | ||
2826 | 136 | """ | ||
2827 | 137 | |||
2828 | 138 | if not destination: | ||
2829 | 139 | destination = "%s.tar" % self.keyring_path | ||
2830 | 140 | |||
2831 | 141 | if os.path.isfile(destination): | ||
2832 | 142 | os.remove(destination) | ||
2833 | 143 | |||
2834 | 144 | tarball = tarfile.open(destination, "w:") | ||
2835 | 145 | tarball.add("%s/keyring.json" % self.keyring_path, | ||
2836 | 146 | arcname="keyring.json") | ||
2837 | 147 | tarball.add("%s/pubring.gpg" % self.keyring_path, | ||
2838 | 148 | arcname="keyring.gpg") | ||
2839 | 149 | tarball.close() | ||
2840 | 150 | |||
2841 | 151 | return destination | ||
2842 | 152 | |||
2843 | 153 | def set_metadata(self, keyring_type, keyring_expiry=None, | ||
2844 | 154 | keyring_model=None): | ||
2845 | 155 | """ | ||
2846 | 156 | Generate a new keyring.json file. | ||
2847 | 157 | """ | ||
2848 | 158 | |||
2849 | 159 | keyring_json = {} | ||
2850 | 160 | if keyring_type: | ||
2851 | 161 | self.keyring_type = keyring_type | ||
2852 | 162 | keyring_json['type'] = keyring_type | ||
2853 | 163 | |||
2854 | 164 | if keyring_expiry: | ||
2855 | 165 | self.keyring_expiry = keyring_expiry | ||
2856 | 166 | keyring_json['expiry'] = keyring_expiry | ||
2857 | 167 | |||
2858 | 168 | if keyring_model: | ||
2859 | 169 | self.keyring_model = keyring_model | ||
2860 | 170 | keyring_json['model'] = keyring_model | ||
2861 | 171 | |||
2862 | 172 | with open("%s/keyring.json" % self.keyring_path, "w+") as fd: | ||
2863 | 173 | fd.write("%s\n" % json.dumps(keyring_json, sort_keys=True, | ||
2864 | 174 | indent=4, separators=(',', ': '))) | ||
2865 | 175 | |||
2866 | 176 | def list_keys(self): | ||
2867 | 177 | os.environ['GNUPGHOME'] = self.keyring_path | ||
2868 | 178 | |||
2869 | 179 | keys = [] | ||
2870 | 180 | |||
2871 | 181 | ctx = gpgme.Context() | ||
2872 | 182 | for key in ctx.keylist(): | ||
2873 | 183 | keys.append((key.subkeys[0].keyid, key.subkeys[0].length, | ||
2874 | 184 | [uid.uid for uid in key.uids])) | ||
2875 | 185 | |||
2876 | 186 | return keys | ||
2877 | 187 | |||
2878 | 188 | def export_key(self, path, key, armor=True): | ||
2879 | 189 | os.environ['GNUPGHOME'] = self.keyring_path | ||
2880 | 190 | |||
2881 | 191 | ctx = gpgme.Context() | ||
2882 | 192 | ctx.armor = armor | ||
2883 | 193 | |||
2884 | 194 | gpg_key = ctx.get_key(key) | ||
2885 | 195 | |||
2886 | 196 | with open(path, "wb+") as fd: | ||
2887 | 197 | for subkey in gpg_key.subkeys: | ||
2888 | 198 | ctx.export(str(subkey.keyid), fd) | ||
2889 | 199 | |||
2890 | 200 | def import_key(self, path, armor=True): | ||
2891 | 201 | os.environ['GNUPGHOME'] = self.keyring_path | ||
2892 | 202 | |||
2893 | 203 | ctx = gpgme.Context() | ||
2894 | 204 | ctx.armor = armor | ||
2895 | 205 | |||
2896 | 206 | with open(path, "rb") as fd: | ||
2897 | 207 | ctx.import_(fd) | ||
2898 | 208 | |||
2899 | 209 | def import_keys(self, path): | ||
2900 | 210 | """ | ||
2901 | 211 | Import all the keys from the specified keyring. | ||
2902 | 212 | """ | ||
2903 | 213 | |||
2904 | 214 | os.environ['GNUPGHOME'] = path | ||
2905 | 215 | |||
2906 | 216 | ctx = gpgme.Context() | ||
2907 | 217 | |||
2908 | 218 | keys = [] | ||
2909 | 219 | for key in list(ctx.keylist()): | ||
2910 | 220 | for subkey in key.subkeys: | ||
2911 | 221 | content = BytesIO() | ||
2912 | 222 | ctx.export(str(subkey.keyid), content) | ||
2913 | 223 | keys.append(content) | ||
2914 | 224 | |||
2915 | 225 | os.environ['GNUPGHOME'] = self.keyring_path | ||
2916 | 226 | ctx = gpgme.Context() | ||
2917 | 227 | |||
2918 | 228 | for key in keys: | ||
2919 | 229 | key.seek(0) | ||
2920 | 230 | ctx.import_(key) | ||
2921 | 231 | |||
2922 | 232 | def del_key(self, key): | ||
2923 | 233 | os.environ['GNUPGHOME'] = self.keyring_path | ||
2924 | 234 | |||
2925 | 235 | ctx = gpgme.Context() | ||
2926 | 236 | |||
2927 | 237 | gpg_key = ctx.get_key(key) | ||
2928 | 238 | |||
2929 | 239 | ctx.delete(gpg_key) | ||
2930 | 0 | 240 | ||
2931 | === added file 'lib/systemimage/tools.py' | |||
2932 | --- lib/systemimage/tools.py 1970-01-01 00:00:00 +0000 | |||
2933 | +++ lib/systemimage/tools.py 2014-10-10 11:11:17 +0000 | |||
2934 | @@ -0,0 +1,367 @@ | |||
2935 | 1 | # -*- coding: utf-8 -*- | ||
2936 | 2 | |||
2937 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
2938 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
2939 | 5 | |||
2940 | 6 | # This program is free software: you can redistribute it and/or modify | ||
2941 | 7 | # it under the terms of the GNU General Public License as published by | ||
2942 | 8 | # the Free Software Foundation; version 3 of the License. | ||
2943 | 9 | # | ||
2944 | 10 | # This program is distributed in the hope that it will be useful, | ||
2945 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
2946 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
2947 | 13 | # GNU General Public License for more details. | ||
2948 | 14 | # | ||
2949 | 15 | # You should have received a copy of the GNU General Public License | ||
2950 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
2951 | 17 | |||
2952 | 18 | from io import BytesIO | ||
2953 | 19 | |||
2954 | 20 | import gzip | ||
2955 | 21 | import os | ||
2956 | 22 | import re | ||
2957 | 23 | import shutil | ||
2958 | 24 | import subprocess | ||
2959 | 25 | import tarfile | ||
2960 | 26 | import tempfile | ||
2961 | 27 | import time | ||
2962 | 28 | |||
2963 | 29 | |||
2964 | 30 | def expand_path(path, base="/"): | ||
2965 | 31 | """ | ||
2966 | 32 | Takes a path and returns a tuple containing the absolute path | ||
2967 | 33 | and a relative path (relative to base). | ||
2968 | 34 | """ | ||
2969 | 35 | |||
2970 | 36 | if path.startswith(base): | ||
2971 | 37 | path = re.sub('^%s' % re.escape(base), "", path) | ||
2972 | 38 | |||
2973 | 39 | if path.startswith(os.sep): | ||
2974 | 40 | relpath = path[1:] | ||
2975 | 41 | else: | ||
2976 | 42 | relpath = path | ||
2977 | 43 | |||
2978 | 44 | abspath = os.path.realpath(os.path.join(base, relpath)) | ||
2979 | 45 | |||
2980 | 46 | return abspath, relpath | ||
2981 | 47 | |||
2982 | 48 | |||
2983 | 49 | # Imported from cdimage.osextras | ||
2984 | 50 | def find_on_path(command): | ||
2985 | 51 | """Is command on the executable search path?""" | ||
2986 | 52 | |||
2987 | 53 | if 'PATH' not in os.environ: | ||
2988 | 54 | return False | ||
2989 | 55 | path = os.environ['PATH'] | ||
2990 | 56 | for element in path.split(os.pathsep): | ||
2991 | 57 | if not element: | ||
2992 | 58 | continue | ||
2993 | 59 | filename = os.path.join(element, command) | ||
2994 | 60 | if os.path.isfile(filename) and os.access(filename, os.X_OK): | ||
2995 | 61 | return True | ||
2996 | 62 | return False | ||
2997 | 63 | |||
2998 | 64 | |||
2999 | 65 | def generate_version_tarball(config, channel, device, version, path, | ||
3000 | 66 | build_path="system/etc/ubuntu-build", | ||
3001 | 67 | channel_path="system/etc/system-image/" | ||
3002 | 68 | "channel.ini", | ||
3003 | 69 | version_detail=None, | ||
3004 | 70 | channel_target=None): | ||
3005 | 71 | """ | ||
3006 | 72 | Generates a tarball which contains two files | ||
3007 | 73 | (build_path and channel_path). | ||
3008 | 74 | The first contains the build id, the second a .ini config file. | ||
3009 | 75 | The resulting tarball is written at the provided location (path). | ||
3010 | 76 | """ | ||
3011 | 77 | |||
3012 | 78 | tarball = tarfile.open(path, 'w:') | ||
3013 | 79 | |||
3014 | 80 | version_file = tarfile.TarInfo() | ||
3015 | 81 | version_file.size = len(version) + 1 | ||
3016 | 82 | version_file.mtime = int(time.strftime("%s", time.localtime())) | ||
3017 | 83 | version_file.name = build_path | ||
3018 | 84 | |||
3019 | 85 | # Append a line break | ||
3020 | 86 | version += "\n" | ||
3021 | 87 | |||
3022 | 88 | tarball.addfile(version_file, BytesIO(version.encode('utf-8'))) | ||
3023 | 89 | |||
3024 | 90 | http_port = config.public_http_port | ||
3025 | 91 | https_port = config.public_https_port | ||
3026 | 92 | |||
3027 | 93 | if http_port == 0: | ||
3028 | 94 | http_port = "disabled" | ||
3029 | 95 | |||
3030 | 96 | if https_port == 0: | ||
3031 | 97 | https_port = "disabled" | ||
3032 | 98 | |||
3033 | 99 | channel = """[service] | ||
3034 | 100 | base: %s | ||
3035 | 101 | http_port: %s | ||
3036 | 102 | https_port: %s | ||
3037 | 103 | channel: %s | ||
3038 | 104 | device: %s | ||
3039 | 105 | build_number: %s | ||
3040 | 106 | """ % (config.public_fqdn, http_port, https_port, | ||
3041 | 107 | channel, device, version.strip()) | ||
3042 | 108 | |||
3043 | 109 | if channel_target: | ||
3044 | 110 | channel += "channel_target: %s\n" % channel_target | ||
3045 | 111 | |||
3046 | 112 | if version_detail: | ||
3047 | 113 | channel += "version_detail: %s\n" % version_detail | ||
3048 | 114 | |||
3049 | 115 | channel_file = tarfile.TarInfo() | ||
3050 | 116 | channel_file.size = len(channel) | ||
3051 | 117 | channel_file.mtime = int(time.strftime("%s", time.localtime())) | ||
3052 | 118 | channel_file.name = channel_path | ||
3053 | 119 | |||
3054 | 120 | tarball.addfile(channel_file, BytesIO(channel.encode('utf-8'))) | ||
3055 | 121 | |||
3056 | 122 | tarball.close() | ||
3057 | 123 | |||
3058 | 124 | |||
3059 | 125 | def gzip_compress(path, destination=None, level=9): | ||
3060 | 126 | """ | ||
3061 | 127 | Compress a file (path) using gzip. | ||
3062 | 128 | By default, creates a .gz version of the file in the same directory. | ||
3063 | 129 | An alternate destination path may be provided. | ||
3064 | 130 | The compress level is 9 by default but can be overriden. | ||
3065 | 131 | """ | ||
3066 | 132 | |||
3067 | 133 | if not destination: | ||
3068 | 134 | destination = "%s.gz" % path | ||
3069 | 135 | |||
3070 | 136 | if os.path.exists(destination): | ||
3071 | 137 | raise Exception("destination already exists.") | ||
3072 | 138 | |||
3073 | 139 | uncompressed = open(path, "rb") | ||
3074 | 140 | compressed = gzip.open(destination, "wb+", level) | ||
3075 | 141 | compressed.writelines(uncompressed) | ||
3076 | 142 | compressed.close() | ||
3077 | 143 | uncompressed.close() | ||
3078 | 144 | |||
3079 | 145 | return destination | ||
3080 | 146 | |||
3081 | 147 | |||
3082 | 148 | def gzip_uncompress(path, destination=None): | ||
3083 | 149 | """ | ||
3084 | 150 | Uncompress a file (path) using gzip. | ||
3085 | 151 | By default, uses the source path without the .gz prefix as the target. | ||
3086 | 152 | An alternate destination path may be provided. | ||
3087 | 153 | """ | ||
3088 | 154 | |||
3089 | 155 | if not destination and path[-3:] != ".gz": | ||
3090 | 156 | raise Exception("unspecified destination and path doesn't end" | ||
3091 | 157 | " with .gz") | ||
3092 | 158 | |||
3093 | 159 | if not destination: | ||
3094 | 160 | destination = path[:-3] | ||
3095 | 161 | |||
3096 | 162 | if os.path.exists(destination): | ||
3097 | 163 | raise Exception("destination already exists.") | ||
3098 | 164 | |||
3099 | 165 | compressed = gzip.open(path, "rb") | ||
3100 | 166 | uncompressed = open(destination, "wb+") | ||
3101 | 167 | uncompressed.writelines(compressed) | ||
3102 | 168 | uncompressed.close() | ||
3103 | 169 | compressed.close() | ||
3104 | 170 | |||
3105 | 171 | return destination | ||
3106 | 172 | |||
3107 | 173 | |||
3108 | 174 | def xz_compress(path, destination=None, level=9): | ||
3109 | 175 | """ | ||
3110 | 176 | Compress a file (path) using xz. | ||
3111 | 177 | By default, creates a .xz version of the file in the same directory. | ||
3112 | 178 | An alternate destination path may be provided. | ||
3113 | 179 | The compress level is 9 by default but can be overriden. | ||
3114 | 180 | """ | ||
3115 | 181 | |||
3116 | 182 | # NOTE: Once we can drop support for < 3.3, the new lzma module can be used | ||
3117 | 183 | |||
3118 | 184 | if not destination: | ||
3119 | 185 | destination = "%s.xz" % path | ||
3120 | 186 | |||
3121 | 187 | if os.path.exists(destination): | ||
3122 | 188 | raise Exception("destination already exists.") | ||
3123 | 189 | |||
3124 | 190 | if find_on_path("pxz"): | ||
3125 | 191 | xz_command = "pxz" | ||
3126 | 192 | else: | ||
3127 | 193 | xz_command = "xz" | ||
3128 | 194 | |||
3129 | 195 | with open(destination, "wb+") as fd: | ||
3130 | 196 | retval = subprocess.call([xz_command, '-z', '-%s' % level, '-c', path], | ||
3131 | 197 | stdout=fd) | ||
3132 | 198 | return retval | ||
3133 | 199 | |||
3134 | 200 | |||
3135 | 201 | def xz_uncompress(path, destination=None): | ||
3136 | 202 | """ | ||
3137 | 203 | Uncompress a file (path) using xz. | ||
3138 | 204 | By default, uses the source path without the .xz prefix as the target. | ||
3139 | 205 | An alternate destination path may be provided. | ||
3140 | 206 | """ | ||
3141 | 207 | |||
3142 | 208 | # NOTE: Once we can drop support for < 3.3, the new lzma module can be used | ||
3143 | 209 | |||
3144 | 210 | if not destination and path[-3:] != ".xz": | ||
3145 | 211 | raise Exception("unspecified destination and path doesn't end" | ||
3146 | 212 | " with .xz") | ||
3147 | 213 | |||
3148 | 214 | if not destination: | ||
3149 | 215 | destination = path[:-3] | ||
3150 | 216 | |||
3151 | 217 | if os.path.exists(destination): | ||
3152 | 218 | raise Exception("destination already exists.") | ||
3153 | 219 | |||
3154 | 220 | with open(destination, "wb+") as fd: | ||
3155 | 221 | retval = subprocess.call(['xz', '-d', '-c', path], | ||
3156 | 222 | stdout=fd) | ||
3157 | 223 | |||
3158 | 224 | return retval | ||
3159 | 225 | |||
3160 | 226 | |||
3161 | 227 | def trigger_mirror(host, port, username, key, command): | ||
3162 | 228 | return subprocess.call(['ssh', | ||
3163 | 229 | '-i', key, | ||
3164 | 230 | '-l', username, | ||
3165 | 231 | '-p', str(port), | ||
3166 | 232 | host, | ||
3167 | 233 | command]) | ||
3168 | 234 | |||
3169 | 235 | |||
3170 | 236 | def sync_mirrors(config): | ||
3171 | 237 | for mirror in sorted(config.mirrors.values(), | ||
3172 | 238 | key=lambda mirror: mirror.ssh_host): | ||
3173 | 239 | trigger_mirror(mirror.ssh_host, mirror.ssh_port, mirror.ssh_user, | ||
3174 | 240 | mirror.ssh_key, mirror.ssh_command) | ||
3175 | 241 | |||
3176 | 242 | |||
3177 | 243 | def repack_recovery_keyring(conf, path, keyring_name): | ||
3178 | 244 | tempdir = tempfile.mkdtemp() | ||
3179 | 245 | |||
3180 | 246 | xz_uncompress(path, os.path.join(tempdir, "input.tar")) | ||
3181 | 247 | |||
3182 | 248 | input_tarball = tarfile.open(os.path.join(tempdir, "input.tar"), "r:") | ||
3183 | 249 | |||
3184 | 250 | # Make sure the partition is in there | ||
3185 | 251 | if "partitions/recovery.img" not in input_tarball.getnames(): | ||
3186 | 252 | shutil.rmtree(tempdir) | ||
3187 | 253 | return False | ||
3188 | 254 | |||
3189 | 255 | input_tarball.extract("partitions/recovery.img", tempdir) | ||
3190 | 256 | |||
3191 | 257 | # Extract the content of the .img | ||
3192 | 258 | os.mkdir(os.path.join(tempdir, "img")) | ||
3193 | 259 | old_pwd = os.getcwd() | ||
3194 | 260 | os.chdir(os.path.join(tempdir, "img")) | ||
3195 | 261 | cmd = ["abootimg", | ||
3196 | 262 | "-x", os.path.join(tempdir, "partitions", "recovery.img")] | ||
3197 | 263 | |||
3198 | 264 | with open(os.path.devnull, "w") as devnull: | ||
3199 | 265 | subprocess.call(cmd, stdout=devnull, stderr=devnull) | ||
3200 | 266 | |||
3201 | 267 | os.chdir(old_pwd) | ||
3202 | 268 | |||
3203 | 269 | # Extract the content of the initrd | ||
3204 | 270 | os.mkdir(os.path.join(tempdir, "initrd")) | ||
3205 | 271 | state_path = os.path.join(tempdir, "fakeroot_state") | ||
3206 | 272 | old_pwd = os.getcwd() | ||
3207 | 273 | os.chdir(os.path.join(tempdir, "initrd")) | ||
3208 | 274 | |||
3209 | 275 | gzip_uncompress(os.path.join(tempdir, "img", "initrd.img"), | ||
3210 | 276 | os.path.join(tempdir, "img", "initrd")) | ||
3211 | 277 | |||
3212 | 278 | with open(os.path.join(tempdir, "img", "initrd"), "rb") as fd: | ||
3213 | 279 | with open(os.path.devnull, "w") as devnull: | ||
3214 | 280 | subprocess.call(['fakeroot', '-s', state_path, 'cpio', '-i'], | ||
3215 | 281 | stdin=fd, stdout=devnull, stderr=devnull) | ||
3216 | 282 | |||
3217 | 283 | os.chdir(old_pwd) | ||
3218 | 284 | |||
3219 | 285 | # Swap the files | ||
3220 | 286 | keyring_path = os.path.join(conf.gpg_keyring_path, "archive-master") | ||
3221 | 287 | |||
3222 | 288 | shutil.copy("%s.tar.xz" % keyring_path, | ||
3223 | 289 | os.path.join(tempdir, "initrd", "etc", "system-image", | ||
3224 | 290 | "archive-master.tar.xz")) | ||
3225 | 291 | |||
3226 | 292 | shutil.copy("%s.tar.xz.asc" % keyring_path, | ||
3227 | 293 | os.path.join(tempdir, "initrd", "etc", "system-image", | ||
3228 | 294 | "archive-master.tar.xz.asc")) | ||
3229 | 295 | |||
3230 | 296 | # Re-generate the initrd | ||
3231 | 297 | old_pwd = os.getcwd() | ||
3232 | 298 | os.chdir(os.path.join(tempdir, "initrd")) | ||
3233 | 299 | |||
3234 | 300 | find = subprocess.Popen(["find", "."], stdout=subprocess.PIPE) | ||
3235 | 301 | with open(os.path.join(tempdir, "img", "initrd"), "w+") as fd: | ||
3236 | 302 | with open(os.path.devnull, "w") as devnull: | ||
3237 | 303 | subprocess.call(['fakeroot', '-i', state_path, 'cpio', | ||
3238 | 304 | '-o', '--format=newc'], | ||
3239 | 305 | stdin=find.stdout, | ||
3240 | 306 | stdout=fd, | ||
3241 | 307 | stderr=devnull) | ||
3242 | 308 | |||
3243 | 309 | os.chdir(old_pwd) | ||
3244 | 310 | |||
3245 | 311 | os.rename(os.path.join(tempdir, "img", "initrd.img"), | ||
3246 | 312 | os.path.join(tempdir, "img", "initrd.img.bak")) | ||
3247 | 313 | gzip_compress(os.path.join(tempdir, "img", "initrd"), | ||
3248 | 314 | os.path.join(tempdir, "img", "initrd.img")) | ||
3249 | 315 | |||
3250 | 316 | # Rewrite bootimg.cfg | ||
3251 | 317 | content = "" | ||
3252 | 318 | with open(os.path.join(tempdir, "img", "bootimg.cfg"), "r") as source: | ||
3253 | 319 | for line in source: | ||
3254 | 320 | if line.startswith("bootsize"): | ||
3255 | 321 | line = "bootsize=0x900000\n" | ||
3256 | 322 | content += line | ||
3257 | 323 | |||
3258 | 324 | with open(os.path.join(tempdir, "img", "bootimg.cfg"), "w+") as dest: | ||
3259 | 325 | dest.write(content) | ||
3260 | 326 | |||
3261 | 327 | # Update the partition image | ||
3262 | 328 | with open(os.path.devnull, "w") as devnull: | ||
3263 | 329 | subprocess.call(['abootimg', '-u', | ||
3264 | 330 | os.path.join(tempdir, "partitions", "recovery.img"), | ||
3265 | 331 | "-f", os.path.join(tempdir, "img", "bootimg.cfg")], | ||
3266 | 332 | stdout=devnull, stderr=devnull) | ||
3267 | 333 | |||
3268 | 334 | # Update the partition image | ||
3269 | 335 | with open(os.path.devnull, "w") as devnull: | ||
3270 | 336 | subprocess.call(['abootimg', '-u', | ||
3271 | 337 | os.path.join(tempdir, "partitions", "recovery.img"), | ||
3272 | 338 | "-r", os.path.join(tempdir, "img", "initrd.img")], | ||
3273 | 339 | stdout=devnull, stderr=devnull) | ||
3274 | 340 | |||
3275 | 341 | # Generate a new tarball | ||
3276 | 342 | output_tarball = tarfile.open(os.path.join(tempdir, "output.tar"), "w:") | ||
3277 | 343 | for entry in input_tarball: | ||
3278 | 344 | fileptr = None | ||
3279 | 345 | if entry.isfile(): | ||
3280 | 346 | try: | ||
3281 | 347 | if entry.name == "partitions/recovery.img": | ||
3282 | 348 | with open(os.path.join(tempdir, "partitions", | ||
3283 | 349 | "recovery.img"), "rb") as fd: | ||
3284 | 350 | fileptr = BytesIO(fd.read()) | ||
3285 | 351 | entry.size = os.stat( | ||
3286 | 352 | os.path.join(tempdir, "partitions", | ||
3287 | 353 | "recovery.img")).st_size | ||
3288 | 354 | else: | ||
3289 | 355 | fileptr = input_tarball.extractfile(entry.name) | ||
3290 | 356 | except KeyError: # pragma: no cover | ||
3291 | 357 | pass | ||
3292 | 358 | |||
3293 | 359 | output_tarball.addfile(entry, fileobj=fileptr) | ||
3294 | 360 | output_tarball.close() | ||
3295 | 361 | |||
3296 | 362 | os.remove(path) | ||
3297 | 363 | xz_compress(os.path.join(tempdir, "output.tar"), path) | ||
3298 | 364 | |||
3299 | 365 | shutil.rmtree(tempdir) | ||
3300 | 366 | |||
3301 | 367 | return True | ||
3302 | 0 | 368 | ||
3303 | === added file 'lib/systemimage/tree.py' | |||
3304 | --- lib/systemimage/tree.py 1970-01-01 00:00:00 +0000 | |||
3305 | +++ lib/systemimage/tree.py 2014-10-10 11:11:17 +0000 | |||
3306 | @@ -0,0 +1,999 @@ | |||
3307 | 1 | # -*- coding: utf-8 -*- | ||
3308 | 2 | |||
3309 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
3310 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
3311 | 5 | |||
3312 | 6 | # This program is free software: you can redistribute it and/or modify | ||
3313 | 7 | # it under the terms of the GNU General Public License as published by | ||
3314 | 8 | # the Free Software Foundation; version 3 of the License. | ||
3315 | 9 | # | ||
3316 | 10 | # This program is distributed in the hope that it will be useful, | ||
3317 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
3318 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
3319 | 13 | # GNU General Public License for more details. | ||
3320 | 14 | # | ||
3321 | 15 | # You should have received a copy of the GNU General Public License | ||
3322 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
3323 | 17 | |||
3324 | 18 | import copy | ||
3325 | 19 | import json | ||
3326 | 20 | import os | ||
3327 | 21 | import shutil | ||
3328 | 22 | import time | ||
3329 | 23 | |||
3330 | 24 | from contextlib import contextmanager | ||
3331 | 25 | from hashlib import sha256 | ||
3332 | 26 | from systemimage import gpg, tools | ||
3333 | 27 | |||
3334 | 28 | |||
3335 | 29 | # Context managers | ||
3336 | 30 | @contextmanager | ||
3337 | 31 | def channels_json(config, path, commit=False): | ||
3338 | 32 | """ | ||
3339 | 33 | Context function (to be used with "with") that will open a | ||
3340 | 34 | channels.json file, parse it, validate it and return the | ||
3341 | 35 | decoded version. | ||
3342 | 36 | |||
3343 | 37 | If commit is True, the file will then be updated (or created) on | ||
3344 | 38 | exit. | ||
3345 | 39 | """ | ||
3346 | 40 | |||
3347 | 41 | # If the file doesn't exist, just yield an empty dict | ||
3348 | 42 | json_content = {} | ||
3349 | 43 | if os.path.exists(path): | ||
3350 | 44 | with open(path, "r") as fd: | ||
3351 | 45 | content = fd.read() | ||
3352 | 46 | if content: | ||
3353 | 47 | json_content = json.loads(content) | ||
3354 | 48 | |||
3355 | 49 | # Validation | ||
3356 | 50 | if not isinstance(json_content, dict): | ||
3357 | 51 | raise TypeError("Invalid channels.json, not a dict.") | ||
3358 | 52 | |||
3359 | 53 | if commit: | ||
3360 | 54 | orig_json_content = copy.deepcopy(json_content) | ||
3361 | 55 | |||
3362 | 56 | # Yield the decoded value and save on exit | ||
3363 | 57 | try: | ||
3364 | 58 | yield json_content | ||
3365 | 59 | finally: | ||
3366 | 60 | if commit and (orig_json_content != json_content or | ||
3367 | 61 | not os.path.exists(path)): | ||
3368 | 62 | new_path = "%s.new" % path | ||
3369 | 63 | with open(new_path, "w+") as fd: | ||
3370 | 64 | fd.write("%s\n" % json.dumps(json_content, sort_keys=True, | ||
3371 | 65 | indent=4, separators=(',', ': '))) | ||
3372 | 66 | |||
3373 | 67 | # Move the signature | ||
3374 | 68 | gpg.sign_file(config, "image-signing", new_path) | ||
3375 | 69 | if os.path.exists("%s.asc" % path): | ||
3376 | 70 | os.remove("%s.asc" % path) | ||
3377 | 71 | os.rename("%s.asc" % new_path, "%s.asc" % path) | ||
3378 | 72 | |||
3379 | 73 | # Move the index | ||
3380 | 74 | if os.path.exists(path): | ||
3381 | 75 | os.remove(path) | ||
3382 | 76 | os.rename(new_path, path) | ||
3383 | 77 | |||
3384 | 78 | |||
3385 | 79 | @contextmanager | ||
3386 | 80 | def index_json(config, path, commit=False): | ||
3387 | 81 | """ | ||
3388 | 82 | Context function (to be used with "with") that will open an | ||
3389 | 83 | index.json file, parse it, validate it and return the | ||
3390 | 84 | decoded version. | ||
3391 | 85 | |||
3392 | 86 | If commit is True, the file will then be updated (or created) on | ||
3393 | 87 | exit. | ||
3394 | 88 | """ | ||
3395 | 89 | |||
3396 | 90 | # If the file doesn't exist, just yield an empty dict | ||
3397 | 91 | json_content = {} | ||
3398 | 92 | json_content['global'] = {} | ||
3399 | 93 | json_content['images'] = [] | ||
3400 | 94 | |||
3401 | 95 | if os.path.exists(path): | ||
3402 | 96 | with open(path, "r") as fd: | ||
3403 | 97 | content = fd.read() | ||
3404 | 98 | if content: | ||
3405 | 99 | json_content = json.loads(content) | ||
3406 | 100 | |||
3407 | 101 | # Validation | ||
3408 | 102 | if not isinstance(json_content, dict): | ||
3409 | 103 | raise TypeError("Invalid index.json, not a dict.") | ||
3410 | 104 | |||
3411 | 105 | if commit: | ||
3412 | 106 | orig_json_content = copy.deepcopy(json_content) | ||
3413 | 107 | |||
3414 | 108 | # Yield the decoded value and save on exit | ||
3415 | 109 | try: | ||
3416 | 110 | yield json_content | ||
3417 | 111 | finally: | ||
3418 | 112 | # Remove any invalid attribute | ||
3419 | 113 | versions = sorted({image['version'] | ||
3420 | 114 | for image in json_content['images']}) | ||
3421 | 115 | if versions: | ||
3422 | 116 | last_version = versions[-1] | ||
3423 | 117 | |||
3424 | 118 | # Remove phased-percentage from any old image | ||
3425 | 119 | for image in json_content['images']: | ||
3426 | 120 | if image['version'] != last_version and \ | ||
3427 | 121 | "phased-percentage" in image: | ||
3428 | 122 | image.pop("phased-percentage") | ||
3429 | 123 | |||
3430 | 124 | # Save to disk | ||
3431 | 125 | if commit and (orig_json_content != json_content or | ||
3432 | 126 | not os.path.exists(path)): | ||
3433 | 127 | json_content['global']['generated_at'] = time.strftime( | ||
3434 | 128 | "%a %b %d %H:%M:%S UTC %Y", time.gmtime()) | ||
3435 | 129 | |||
3436 | 130 | new_path = "%s.new" % path | ||
3437 | 131 | with open(new_path, "w+") as fd: | ||
3438 | 132 | fd.write("%s\n" % json.dumps(json_content, sort_keys=True, | ||
3439 | 133 | indent=4, separators=(',', ': '))) | ||
3440 | 134 | |||
3441 | 135 | # Move the signature | ||
3442 | 136 | gpg.sign_file(config, "image-signing", new_path) | ||
3443 | 137 | if os.path.exists("%s.asc" % path): | ||
3444 | 138 | os.remove("%s.asc" % path) | ||
3445 | 139 | os.rename("%s.asc" % new_path, "%s.asc" % path) | ||
3446 | 140 | |||
3447 | 141 | # Move the index | ||
3448 | 142 | if os.path.exists(path): | ||
3449 | 143 | os.remove(path) | ||
3450 | 144 | os.rename(new_path, path) | ||
3451 | 145 | |||
3452 | 146 | |||
3453 | 147 | class Tree: | ||
3454 | 148 | def __init__(self, config, path=None): | ||
3455 | 149 | if not path: | ||
3456 | 150 | path = config.publish_path | ||
3457 | 151 | |||
3458 | 152 | if not os.path.isdir(path): | ||
3459 | 153 | raise Exception("Invalid path: %s" % path) | ||
3460 | 154 | |||
3461 | 155 | self.config = config | ||
3462 | 156 | self.path = path | ||
3463 | 157 | self.indexpath = os.path.join(path, "channels.json") | ||
3464 | 158 | |||
3465 | 159 | def __list_existing(self): | ||
3466 | 160 | """ | ||
3467 | 161 | Returns a set of all files present in the tree and a set of | ||
3468 | 162 | empty directories that can be removed. | ||
3469 | 163 | """ | ||
3470 | 164 | |||
3471 | 165 | existing_files = set() | ||
3472 | 166 | empty_dirs = set() | ||
3473 | 167 | |||
3474 | 168 | for dirpath, dirnames, filenames in os.walk(self.path): | ||
3475 | 169 | if dirpath == os.path.join(self.path, "gpg"): | ||
3476 | 170 | continue | ||
3477 | 171 | |||
3478 | 172 | if not filenames and not dirnames: | ||
3479 | 173 | empty_dirs.add(dirpath) | ||
3480 | 174 | |||
3481 | 175 | for entry in filenames: | ||
3482 | 176 | existing_files.add(os.path.join(dirpath, entry)) | ||
3483 | 177 | |||
3484 | 178 | return (existing_files, empty_dirs) | ||
3485 | 179 | |||
3486 | 180 | def __list_referenced(self): | ||
3487 | 181 | """ | ||
3488 | 182 | Returns a set of all files that are referenced by the | ||
3489 | 183 | various indexes and should be present in the tree. | ||
3490 | 184 | """ | ||
3491 | 185 | |||
3492 | 186 | listed_files = set() | ||
3493 | 187 | listed_files.add(os.path.join(self.path, "channels.json")) | ||
3494 | 188 | listed_files.add(os.path.join(self.path, "channels.json.asc")) | ||
3495 | 189 | |||
3496 | 190 | for channel, metadata in self.list_channels().items(): | ||
3497 | 191 | devices = metadata['devices'] | ||
3498 | 192 | for device in devices: | ||
3499 | 193 | if 'keyring' in devices[device]: | ||
3500 | 194 | listed_files.add(os.path.join( | ||
3501 | 195 | self.path, devices[device]['keyring']['path'][1:])) | ||
3502 | 196 | listed_files.add(os.path.join( | ||
3503 | 197 | self.path, | ||
3504 | 198 | devices[device]['keyring']['signature'][1:])) | ||
3505 | 199 | |||
3506 | 200 | device_entry = self.get_device(channel, device) | ||
3507 | 201 | |||
3508 | 202 | listed_files.add(os.path.join(device_entry.path, "index.json")) | ||
3509 | 203 | listed_files.add(os.path.join(device_entry.path, | ||
3510 | 204 | "index.json.asc")) | ||
3511 | 205 | |||
3512 | 206 | for image in device_entry.list_images(): | ||
3513 | 207 | for entry in image['files']: | ||
3514 | 208 | listed_files.add(os.path.join(self.path, | ||
3515 | 209 | entry['path'][1:])) | ||
3516 | 210 | listed_files.add(os.path.join(self.path, | ||
3517 | 211 | entry['signature'][1:])) | ||
3518 | 212 | |||
3519 | 213 | return listed_files | ||
3520 | 214 | |||
3521 | 215 | def change_channel_alias(self, channel_name, target_name): | ||
3522 | 216 | """ | ||
3523 | 217 | Change the target of an alias. | ||
3524 | 218 | """ | ||
3525 | 219 | |||
3526 | 220 | with channels_json(self.config, self.indexpath) as channels: | ||
3527 | 221 | if channel_name not in channels: | ||
3528 | 222 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3529 | 223 | |||
3530 | 224 | if "alias" not in channels[channel_name] or \ | ||
3531 | 225 | channels[channel_name]['alias'] == channel_name: | ||
3532 | 226 | raise KeyError("Channel isn't an alias: %s" % channel_name) | ||
3533 | 227 | |||
3534 | 228 | if target_name not in channels: | ||
3535 | 229 | raise KeyError("Couldn't find target channel: %s" % | ||
3536 | 230 | target_name) | ||
3537 | 231 | |||
3538 | 232 | self.remove_channel(channel_name) | ||
3539 | 233 | self.create_channel_alias(channel_name, target_name) | ||
3540 | 234 | |||
3541 | 235 | return True | ||
3542 | 236 | |||
3543 | 237 | def cleanup_tree(self): | ||
3544 | 238 | """ | ||
3545 | 239 | Remove any orphaned file from the tree. | ||
3546 | 240 | """ | ||
3547 | 241 | |||
3548 | 242 | for entry in self.list_orphaned_files(): | ||
3549 | 243 | if os.path.isdir(entry): | ||
3550 | 244 | os.rmdir(entry) | ||
3551 | 245 | else: | ||
3552 | 246 | os.remove(entry) | ||
3553 | 247 | |||
3554 | 248 | return True | ||
3555 | 249 | |||
3556 | 250 | def create_channel(self, channel_name): | ||
3557 | 251 | """ | ||
3558 | 252 | Creates a new channel entry in the tree. | ||
3559 | 253 | """ | ||
3560 | 254 | |||
3561 | 255 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3562 | 256 | if channel_name in channels: | ||
3563 | 257 | raise KeyError("Channel already exists: %s" % channel_name) | ||
3564 | 258 | |||
3565 | 259 | channels[channel_name] = {'devices': {}} | ||
3566 | 260 | |||
3567 | 261 | return True | ||
3568 | 262 | |||
3569 | 263 | def create_channel_alias(self, channel_name, target_name): | ||
3570 | 264 | """ | ||
3571 | 265 | Creates a new channel as an alias for an existing one. | ||
3572 | 266 | """ | ||
3573 | 267 | |||
3574 | 268 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3575 | 269 | if channel_name in channels: | ||
3576 | 270 | raise KeyError("Channel already exists: %s" % channel_name) | ||
3577 | 271 | |||
3578 | 272 | if target_name not in channels: | ||
3579 | 273 | raise KeyError("Couldn't find target channel: %s" % | ||
3580 | 274 | target_name) | ||
3581 | 275 | |||
3582 | 276 | channels[channel_name] = {'devices': {}, | ||
3583 | 277 | 'alias': target_name} | ||
3584 | 278 | |||
3585 | 279 | return self.sync_alias(channel_name) | ||
3586 | 280 | |||
3587 | 281 | def create_channel_redirect(self, channel_name, target_name): | ||
3588 | 282 | """ | ||
3589 | 283 | Creates a new channel redirect. | ||
3590 | 284 | """ | ||
3591 | 285 | |||
3592 | 286 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3593 | 287 | if channel_name in channels: | ||
3594 | 288 | raise KeyError("Channel already exists: %s" % channel_name) | ||
3595 | 289 | |||
3596 | 290 | if target_name not in channels: | ||
3597 | 291 | raise KeyError("Couldn't find target channel: %s" % | ||
3598 | 292 | target_name) | ||
3599 | 293 | |||
3600 | 294 | channels[channel_name] = dict(channels[target_name]) | ||
3601 | 295 | channels[channel_name]['redirect'] = target_name | ||
3602 | 296 | |||
3603 | 297 | self.hide_channel(channel_name) | ||
3604 | 298 | |||
3605 | 299 | return True | ||
3606 | 300 | |||
3607 | 301 | def create_device(self, channel_name, device_name, keyring_path=None): | ||
3608 | 302 | """ | ||
3609 | 303 | Creates a new device entry in the tree. | ||
3610 | 304 | """ | ||
3611 | 305 | |||
3612 | 306 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3613 | 307 | if channel_name not in channels: | ||
3614 | 308 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3615 | 309 | |||
3616 | 310 | if device_name in channels[channel_name]['devices']: | ||
3617 | 311 | raise KeyError("Device already exists: %s" % device_name) | ||
3618 | 312 | |||
3619 | 313 | device_path = os.path.join(self.path, channel_name, device_name) | ||
3620 | 314 | if not os.path.exists(device_path): | ||
3621 | 315 | os.makedirs(device_path) | ||
3622 | 316 | |||
3623 | 317 | # Create an empty index if it doesn't exist, if it does, | ||
3624 | 318 | # just validate it | ||
3625 | 319 | with index_json(self.config, os.path.join(device_path, | ||
3626 | 320 | "index.json"), True): | ||
3627 | 321 | pass | ||
3628 | 322 | |||
3629 | 323 | device = {} | ||
3630 | 324 | device['index'] = "/%s/%s/index.json" % (channel_name, device_name) | ||
3631 | 325 | |||
3632 | 326 | channels[channel_name]['devices'][device_name] = device | ||
3633 | 327 | |||
3634 | 328 | if keyring_path: | ||
3635 | 329 | self.set_device_keyring(channel_name, device_name, keyring_path) | ||
3636 | 330 | |||
3637 | 331 | self.sync_aliases(channel_name) | ||
3638 | 332 | self.sync_redirects(channel_name) | ||
3639 | 333 | |||
3640 | 334 | return True | ||
3641 | 335 | |||
3642 | 336 | def generate_index(self, magic=False): | ||
3643 | 337 | """ | ||
3644 | 338 | Re-generate the channels.json file based on the current content of | ||
3645 | 339 | the tree. | ||
3646 | 340 | |||
3647 | 341 | This function is only present for emergency purposes and will | ||
3648 | 342 | completely rebuild the tree based on what's on the filesystem, | ||
3649 | 343 | looking into some well known locations to guess things like device | ||
3650 | 344 | keyring paths. | ||
3651 | 345 | |||
3652 | 346 | Call this function with confirm="I know what I'm doing" to actually | ||
3653 | 347 | trigger it. | ||
3654 | 348 | """ | ||
3655 | 349 | |||
3656 | 350 | if magic != "I know what I'm doing": | ||
3657 | 351 | raise Exception("Invalid magic value, please read the help.") | ||
3658 | 352 | |||
3659 | 353 | if os.path.exists(self.indexpath): | ||
3660 | 354 | os.remove(self.indexpath) | ||
3661 | 355 | |||
3662 | 356 | for channel_name in [entry for entry in os.listdir(self.path) | ||
3663 | 357 | if os.path.isdir(os.path.join(self.path, | ||
3664 | 358 | entry)) | ||
3665 | 359 | and entry not in ('gpg',)]: | ||
3666 | 360 | self.create_channel(channel_name) | ||
3667 | 361 | |||
3668 | 362 | for device_name in os.listdir(os.path.join(self.path, | ||
3669 | 363 | channel_name)): | ||
3670 | 364 | |||
3671 | 365 | path = os.path.join(self.path, channel_name, device_name) | ||
3672 | 366 | if not os.path.exists(os.path.join(path, "index.json")): | ||
3673 | 367 | continue | ||
3674 | 368 | |||
3675 | 369 | keyring_path = os.path.join(path, "device.tar.xz") | ||
3676 | 370 | if (os.path.exists(keyring_path) | ||
3677 | 371 | and os.path.exists("%s.asc" % keyring_path)): | ||
3678 | 372 | self.create_device(channel_name, device_name, keyring_path) | ||
3679 | 373 | else: | ||
3680 | 374 | self.create_device(channel_name, device_name) | ||
3681 | 375 | |||
3682 | 376 | return True | ||
3683 | 377 | |||
3684 | 378 | def get_device(self, channel_name, device_name): | ||
3685 | 379 | """ | ||
3686 | 380 | Returns a Device instance. | ||
3687 | 381 | """ | ||
3688 | 382 | |||
3689 | 383 | with channels_json(self.config, self.indexpath) as channels: | ||
3690 | 384 | if channel_name not in channels: | ||
3691 | 385 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3692 | 386 | |||
3693 | 387 | if device_name not in channels[channel_name]['devices']: | ||
3694 | 388 | raise KeyError("Couldn't find device: %s" % device_name) | ||
3695 | 389 | |||
3696 | 390 | device_path = os.path.dirname(channels[channel_name]['devices'] | ||
3697 | 391 | [device_name]['index']) | ||
3698 | 392 | |||
3699 | 393 | return Device(self.config, os.path.normpath("%s/%s" % (self.path, | ||
3700 | 394 | device_path))) | ||
3701 | 395 | |||
3702 | 396 | def hide_channel(self, channel_name): | ||
3703 | 397 | """ | ||
3704 | 398 | Hide a channel from the client's list. | ||
3705 | 399 | """ | ||
3706 | 400 | |||
3707 | 401 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3708 | 402 | if channel_name not in channels: | ||
3709 | 403 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3710 | 404 | |||
3711 | 405 | channels[channel_name]['hidden'] = True | ||
3712 | 406 | |||
3713 | 407 | return True | ||
3714 | 408 | |||
3715 | 409 | def list_channels(self): | ||
3716 | 410 | """ | ||
3717 | 411 | Returns a dict of all existing channels and devices for each of | ||
3718 | 412 | those. | ||
3719 | 413 | This is simply a decoded version of channels.json | ||
3720 | 414 | """ | ||
3721 | 415 | |||
3722 | 416 | with channels_json(self.config, self.indexpath) as channels: | ||
3723 | 417 | return channels | ||
3724 | 418 | |||
3725 | 419 | def list_missing_files(self): | ||
3726 | 420 | """ | ||
3727 | 421 | Returns a list of absolute paths that should exist but aren't | ||
3728 | 422 | present on the filesystem. | ||
3729 | 423 | """ | ||
3730 | 424 | |||
3731 | 425 | all_files, empty_dirs = self.__list_existing() | ||
3732 | 426 | referenced_files = self.__list_referenced() | ||
3733 | 427 | |||
3734 | 428 | return sorted(referenced_files - all_files) | ||
3735 | 429 | |||
3736 | 430 | def list_orphaned_files(self): | ||
3737 | 431 | """ | ||
3738 | 432 | Returns a list of absolute paths to files that are present in the | ||
3739 | 433 | tree but aren't referenced anywhere. | ||
3740 | 434 | """ | ||
3741 | 435 | |||
3742 | 436 | orphaned_files = set() | ||
3743 | 437 | |||
3744 | 438 | all_files, empty_dirs = self.__list_existing() | ||
3745 | 439 | referenced_files = self.__list_referenced() | ||
3746 | 440 | |||
3747 | 441 | orphaned_files.update(all_files - referenced_files) | ||
3748 | 442 | orphaned_files.update(empty_dirs) | ||
3749 | 443 | |||
3750 | 444 | for entry in list(orphaned_files): | ||
3751 | 445 | if entry.endswith(".json"): | ||
3752 | 446 | tarname = entry.replace(".json", ".tar.xz") | ||
3753 | 447 | if tarname in referenced_files: | ||
3754 | 448 | orphaned_files.remove(entry) | ||
3755 | 449 | |||
3756 | 450 | if entry.endswith(".json.asc"): | ||
3757 | 451 | tarname = entry.replace(".json.asc", ".tar.xz") | ||
3758 | 452 | if tarname in referenced_files: | ||
3759 | 453 | orphaned_files.remove(entry) | ||
3760 | 454 | |||
3761 | 455 | return sorted(orphaned_files) | ||
3762 | 456 | |||
3763 | 457 | def publish_keyring(self, keyring_name): | ||
3764 | 458 | """ | ||
3765 | 459 | Publish the keyring under gpg/ | ||
3766 | 460 | """ | ||
3767 | 461 | |||
3768 | 462 | gpg_path = os.path.join(self.config.publish_path, "gpg") | ||
3769 | 463 | |||
3770 | 464 | if not os.path.exists(gpg_path): | ||
3771 | 465 | os.mkdir(gpg_path) | ||
3772 | 466 | |||
3773 | 467 | keyring_path = os.path.join(self.config.gpg_keyring_path, keyring_name) | ||
3774 | 468 | |||
3775 | 469 | if not os.path.exists("%s.tar.xz" % keyring_path): | ||
3776 | 470 | raise Exception("Missing keyring: %s.tar.xz" % keyring_path) | ||
3777 | 471 | |||
3778 | 472 | if not os.path.exists("%s.tar.xz.asc" % keyring_path): | ||
3779 | 473 | raise Exception("Missing keyring signature: %s.tar.xz.asc" % | ||
3780 | 474 | keyring_path) | ||
3781 | 475 | |||
3782 | 476 | shutil.copy("%s.tar.xz" % keyring_path, gpg_path) | ||
3783 | 477 | shutil.copy("%s.tar.xz.asc" % keyring_path, gpg_path) | ||
3784 | 478 | |||
3785 | 479 | return True | ||
3786 | 480 | |||
3787 | 481 | def remove_channel(self, channel_name): | ||
3788 | 482 | """ | ||
3789 | 483 | Remove a channel and everything it contains. | ||
3790 | 484 | """ | ||
3791 | 485 | |||
3792 | 486 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3793 | 487 | if channel_name not in channels: | ||
3794 | 488 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3795 | 489 | |||
3796 | 490 | channel_path = os.path.join(self.path, channel_name) | ||
3797 | 491 | if os.path.exists(channel_path) and \ | ||
3798 | 492 | "alias" not in channels[channel_name] and \ | ||
3799 | 493 | "redirect" not in channels[channel_name]: | ||
3800 | 494 | shutil.rmtree(channel_path) | ||
3801 | 495 | channels.pop(channel_name) | ||
3802 | 496 | |||
3803 | 497 | return True | ||
3804 | 498 | |||
3805 | 499 | def remove_device(self, channel_name, device_name): | ||
3806 | 500 | """ | ||
3807 | 501 | Remove a device and everything it contains. | ||
3808 | 502 | """ | ||
3809 | 503 | |||
3810 | 504 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3811 | 505 | if channel_name not in channels: | ||
3812 | 506 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3813 | 507 | |||
3814 | 508 | if device_name not in channels[channel_name]['devices']: | ||
3815 | 509 | raise KeyError("Couldn't find device: %s" % device_name) | ||
3816 | 510 | |||
3817 | 511 | device_path = os.path.join(self.path, channel_name, device_name) | ||
3818 | 512 | if os.path.exists(device_path): | ||
3819 | 513 | shutil.rmtree(device_path) | ||
3820 | 514 | channels[channel_name]['devices'].pop(device_name) | ||
3821 | 515 | |||
3822 | 516 | self.sync_aliases(channel_name) | ||
3823 | 517 | self.sync_redirects(channel_name) | ||
3824 | 518 | |||
3825 | 519 | return True | ||
3826 | 520 | |||
3827 | 521 | def rename_channel(self, old_name, new_name): | ||
3828 | 522 | """ | ||
3829 | 523 | Rename a channel. | ||
3830 | 524 | """ | ||
3831 | 525 | |||
3832 | 526 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3833 | 527 | if old_name not in channels: | ||
3834 | 528 | raise KeyError("Couldn't find channel: %s" % old_name) | ||
3835 | 529 | |||
3836 | 530 | if new_name in channels: | ||
3837 | 531 | raise KeyError("Channel already exists: %s" % new_name) | ||
3838 | 532 | |||
3839 | 533 | old_channel_path = os.path.join(self.path, old_name) | ||
3840 | 534 | new_channel_path = os.path.join(self.path, new_name) | ||
3841 | 535 | if "redirect" not in channels[old_name]: | ||
3842 | 536 | if os.path.exists(new_channel_path): | ||
3843 | 537 | raise Exception("Channel path already exists: %s" % | ||
3844 | 538 | new_channel_path) | ||
3845 | 539 | |||
3846 | 540 | if not os.path.exists(os.path.dirname(new_channel_path)): | ||
3847 | 541 | os.makedirs(os.path.dirname(new_channel_path)) | ||
3848 | 542 | if os.path.exists(old_channel_path): | ||
3849 | 543 | os.rename(old_channel_path, new_channel_path) | ||
3850 | 544 | |||
3851 | 545 | channels[new_name] = dict(channels[old_name]) | ||
3852 | 546 | |||
3853 | 547 | if "redirect" not in channels[new_name]: | ||
3854 | 548 | for device_name in channels[new_name]['devices']: | ||
3855 | 549 | index_path = "/%s/%s/index.json" % (new_name, device_name) | ||
3856 | 550 | channels[new_name]['devices'][device_name]['index'] = \ | ||
3857 | 551 | index_path | ||
3858 | 552 | |||
3859 | 553 | with index_json(self.config, "%s/%s" % | ||
3860 | 554 | (self.path, index_path), True) as index: | ||
3861 | 555 | for image in index['images']: | ||
3862 | 556 | for entry in image['files']: | ||
3863 | 557 | entry['path'] = entry['path'] \ | ||
3864 | 558 | .replace("/%s/" % old_name, | ||
3865 | 559 | "/%s/" % new_name) | ||
3866 | 560 | entry['signature'] = entry['signature'] \ | ||
3867 | 561 | .replace("/%s/" % old_name, | ||
3868 | 562 | "/%s/" % new_name) | ||
3869 | 563 | |||
3870 | 564 | channels.pop(old_name) | ||
3871 | 565 | |||
3872 | 566 | return True | ||
3873 | 567 | |||
3874 | 568 | def show_channel(self, channel_name): | ||
3875 | 569 | """ | ||
3876 | 570 | Show a channel from the client's list. | ||
3877 | 571 | """ | ||
3878 | 572 | |||
3879 | 573 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3880 | 574 | if channel_name not in channels: | ||
3881 | 575 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3882 | 576 | |||
3883 | 577 | if "hidden" in channels[channel_name]: | ||
3884 | 578 | channels[channel_name].pop("hidden") | ||
3885 | 579 | |||
3886 | 580 | return True | ||
3887 | 581 | |||
3888 | 582 | def set_device_keyring(self, channel_name, device_name, path): | ||
3889 | 583 | """ | ||
3890 | 584 | Update the keyring entry for the given channel and device. | ||
3891 | 585 | Passing None as the path will unset any existing value. | ||
3892 | 586 | """ | ||
3893 | 587 | |||
3894 | 588 | with channels_json(self.config, self.indexpath, True) as channels: | ||
3895 | 589 | if channel_name not in channels: | ||
3896 | 590 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3897 | 591 | |||
3898 | 592 | if device_name not in channels[channel_name]['devices']: | ||
3899 | 593 | raise KeyError("Couldn't find device: %s" % device_name) | ||
3900 | 594 | |||
3901 | 595 | abspath, relpath = tools.expand_path(path, self.path) | ||
3902 | 596 | |||
3903 | 597 | if not os.path.exists(abspath): | ||
3904 | 598 | raise Exception("Specified GPG keyring doesn't exists: %s" % | ||
3905 | 599 | abspath) | ||
3906 | 600 | |||
3907 | 601 | if not os.path.exists("%s.asc" % abspath): | ||
3908 | 602 | raise Exception("The GPG keyring signature doesn't exists: " | ||
3909 | 603 | "%s.asc" % abspath) | ||
3910 | 604 | |||
3911 | 605 | keyring = {} | ||
3912 | 606 | keyring['path'] = "/%s" % "/".join(relpath.split(os.sep)) | ||
3913 | 607 | keyring['signature'] = "/%s.asc" % "/".join(relpath.split(os.sep)) | ||
3914 | 608 | |||
3915 | 609 | channels[channel_name]['devices'][device_name]['keyring'] = keyring | ||
3916 | 610 | |||
3917 | 611 | return True | ||
3918 | 612 | |||
3919 | 613 | def sync_alias(self, channel_name): | ||
3920 | 614 | """ | ||
3921 | 615 | Update a channel with data from its parent. | ||
3922 | 616 | """ | ||
3923 | 617 | |||
3924 | 618 | with channels_json(self.config, self.indexpath) as channels: | ||
3925 | 619 | if channel_name not in channels: | ||
3926 | 620 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
3927 | 621 | |||
3928 | 622 | if "alias" not in channels[channel_name] or \ | ||
3929 | 623 | channels[channel_name]['alias'] == channel_name: | ||
3930 | 624 | raise TypeError("Not a channel alias") | ||
3931 | 625 | |||
3932 | 626 | target_name = channels[channel_name]['alias'] | ||
3933 | 627 | |||
3934 | 628 | if target_name not in channels: | ||
3935 | 629 | raise KeyError("Couldn't find target channel: %s" % | ||
3936 | 630 | target_name) | ||
3937 | 631 | |||
3938 | 632 | # Start by looking for added/removed devices | ||
3939 | 633 | devices = set(channels[channel_name]['devices'].keys()) | ||
3940 | 634 | target_devices = set(channels[target_name]['devices'].keys()) | ||
3941 | 635 | |||
3942 | 636 | # # Remove any removed device | ||
3943 | 637 | for device in devices - target_devices: | ||
3944 | 638 | self.remove_device(channel_name, device) | ||
3945 | 639 | |||
3946 | 640 | # # Add any missing device | ||
3947 | 641 | for device in target_devices - devices: | ||
3948 | 642 | self.create_device(channel_name, device) | ||
3949 | 643 | |||
3950 | 644 | # Iterate through all the devices to import builds | ||
3951 | 645 | for device_name in target_devices: | ||
3952 | 646 | device = self.get_device(channel_name, device_name) | ||
3953 | 647 | target_device = self.get_device(target_name, device_name) | ||
3954 | 648 | |||
3955 | 649 | # Extract all the current builds | ||
3956 | 650 | device_images = {(image['version'], image.get('base', None), | ||
3957 | 651 | image['type']) | ||
3958 | 652 | for image in device.list_images()} | ||
3959 | 653 | |||
3960 | 654 | target_images = {(image['version'], image.get('base', None), | ||
3961 | 655 | image['type']) | ||
3962 | 656 | for image in target_device.list_images()} | ||
3963 | 657 | |||
3964 | 658 | # Remove any removed image | ||
3965 | 659 | for image in device_images - target_images: | ||
3966 | 660 | device.remove_image(image[2], image[0], base=image[1]) | ||
3967 | 661 | |||
3968 | 662 | # Create the path if it doesn't exist | ||
3969 | 663 | if not os.path.exists(device.path): | ||
3970 | 664 | os.makedirs(device.path) | ||
3971 | 665 | |||
3972 | 666 | # Add any missing image | ||
3973 | 667 | with index_json(self.config, device.indexpath, True) as index: | ||
3974 | 668 | for image in sorted(target_images - device_images): | ||
3975 | 669 | orig = [entry for entry in target_device.list_images() | ||
3976 | 670 | if entry['type'] == image[2] and | ||
3977 | 671 | entry['version'] == image[0] and | ||
3978 | 672 | entry.get('base', None) == image[1]] | ||
3979 | 673 | |||
3980 | 674 | entry = copy.deepcopy(orig[0]) | ||
3981 | 675 | |||
3982 | 676 | # Remove the current version tarball | ||
3983 | 677 | version_detail = None | ||
3984 | 678 | version_index = len(entry['files']) | ||
3985 | 679 | for fentry in entry['files']: | ||
3986 | 680 | if fentry['path'].endswith("version-%s.tar.xz" % | ||
3987 | 681 | entry['version']): | ||
3988 | 682 | |||
3989 | 683 | version_path = "%s/%s" % ( | ||
3990 | 684 | self.config.publish_path, fentry['path']) | ||
3991 | 685 | |||
3992 | 686 | if os.path.exists( | ||
3993 | 687 | version_path.replace(".tar.xz", | ||
3994 | 688 | ".json")): | ||
3995 | 689 | with open( | ||
3996 | 690 | version_path.replace( | ||
3997 | 691 | ".tar.xz", ".json")) as fd: | ||
3998 | 692 | metadata = json.loads(fd.read()) | ||
3999 | 693 | if "channel.ini" in metadata: | ||
4000 | 694 | version_detail = \ | ||
4001 | 695 | metadata['channel.ini'].get( | ||
4002 | 696 | "version_detail", None) | ||
4003 | 697 | |||
4004 | 698 | version_index = fentry['order'] | ||
4005 | 699 | entry['files'].remove(fentry) | ||
4006 | 700 | break | ||
4007 | 701 | |||
4008 | 702 | # Generate a new one | ||
4009 | 703 | path = os.path.join(device.path, | ||
4010 | 704 | "version-%s.tar.xz" % | ||
4011 | 705 | entry['version']) | ||
4012 | 706 | abspath, relpath = tools.expand_path(path, | ||
4013 | 707 | device.pub_path) | ||
4014 | 708 | if not os.path.exists(abspath): | ||
4015 | 709 | tools.generate_version_tarball( | ||
4016 | 710 | self.config, channel_name, device_name, | ||
4017 | 711 | str(entry['version']), | ||
4018 | 712 | abspath.replace(".xz", ""), | ||
4019 | 713 | version_detail=version_detail, | ||
4020 | 714 | channel_target=target_name) | ||
4021 | 715 | tools.xz_compress(abspath.replace(".xz", "")) | ||
4022 | 716 | os.remove(abspath.replace(".xz", "")) | ||
4023 | 717 | gpg.sign_file(self.config, "image-signing", | ||
4024 | 718 | abspath) | ||
4025 | 719 | |||
4026 | 720 | with open(abspath, "rb") as fd: | ||
4027 | 721 | checksum = sha256(fd.read()).hexdigest() | ||
4028 | 722 | |||
4029 | 723 | # Generate the new file entry | ||
4030 | 724 | version = {} | ||
4031 | 725 | version['order'] = version_index | ||
4032 | 726 | version['path'] = "/%s" % "/".join( | ||
4033 | 727 | relpath.split(os.sep)) | ||
4034 | 728 | version['signature'] = "/%s.asc" % "/".join( | ||
4035 | 729 | relpath.split(os.sep)) | ||
4036 | 730 | version['checksum'] = checksum | ||
4037 | 731 | version['size'] = int(os.stat(abspath).st_size) | ||
4038 | 732 | |||
4039 | 733 | # And add it | ||
4040 | 734 | entry['files'].append(version) | ||
4041 | 735 | index['images'].append(entry) | ||
4042 | 736 | |||
4043 | 737 | # Sync phased-percentage | ||
4044 | 738 | versions = sorted({entry[0] for entry in target_images}) | ||
4045 | 739 | if versions: | ||
4046 | 740 | device.set_phased_percentage( | ||
4047 | 741 | versions[-1], | ||
4048 | 742 | target_device.get_phased_percentage(versions[-1])) | ||
4049 | 743 | |||
4050 | 744 | return True | ||
4051 | 745 | |||
4052 | 746 | def sync_aliases(self, channel_name): | ||
4053 | 747 | """ | ||
4054 | 748 | Update any channel that's an alias of the current one. | ||
4055 | 749 | """ | ||
4056 | 750 | |||
4057 | 751 | with channels_json(self.config, self.indexpath) as channels: | ||
4058 | 752 | if channel_name not in channels: | ||
4059 | 753 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
4060 | 754 | |||
4061 | 755 | alias_channels = [name | ||
4062 | 756 | for name, channel | ||
4063 | 757 | in self.list_channels().items() | ||
4064 | 758 | if channel.get("alias", None) == channel_name | ||
4065 | 759 | and name != channel_name] | ||
4066 | 760 | |||
4067 | 761 | for alias_name in alias_channels: | ||
4068 | 762 | self.sync_alias(alias_name) | ||
4069 | 763 | |||
4070 | 764 | return True | ||
4071 | 765 | |||
4072 | 766 | def sync_redirects(self, channel_name): | ||
4073 | 767 | """ | ||
4074 | 768 | Update any channel that's a direct of the current one. | ||
4075 | 769 | """ | ||
4076 | 770 | |||
4077 | 771 | with channels_json(self.config, self.indexpath) as channels: | ||
4078 | 772 | if channel_name not in channels: | ||
4079 | 773 | raise KeyError("Couldn't find channel: %s" % channel_name) | ||
4080 | 774 | |||
4081 | 775 | redirect_channels = [name | ||
4082 | 776 | for name, channel | ||
4083 | 777 | in self.list_channels().items() | ||
4084 | 778 | if channel.get("redirect", None) == channel_name] | ||
4085 | 779 | |||
4086 | 780 | for redirect_name in redirect_channels: | ||
4087 | 781 | self.remove_channel(redirect_name) | ||
4088 | 782 | self.create_channel_redirect(redirect_name, channel_name) | ||
4089 | 783 | |||
4090 | 784 | return True | ||
4091 | 785 | |||
4092 | 786 | |||
4093 | 787 | class Device: | ||
4094 | 788 | def __init__(self, config, path): | ||
4095 | 789 | self.config = config | ||
4096 | 790 | self.pub_path = self.config.publish_path | ||
4097 | 791 | self.path = path | ||
4098 | 792 | self.indexpath = os.path.join(path, "index.json") | ||
4099 | 793 | |||
4100 | 794 | def create_image(self, entry_type, version, description, paths, | ||
4101 | 795 | base=None, bootme=False, minversion=None): | ||
4102 | 796 | """ | ||
4103 | 797 | Add a new image to the index. | ||
4104 | 798 | """ | ||
4105 | 799 | |||
4106 | 800 | if len(paths) == 0: | ||
4107 | 801 | raise Exception("No file passed for this image.") | ||
4108 | 802 | |||
4109 | 803 | files = [] | ||
4110 | 804 | count = 0 | ||
4111 | 805 | |||
4112 | 806 | with index_json(self.config, self.indexpath, True) as index: | ||
4113 | 807 | for path in paths: | ||
4114 | 808 | abspath, relpath = tools.expand_path(path, self.pub_path) | ||
4115 | 809 | |||
4116 | 810 | if not os.path.exists(abspath): | ||
4117 | 811 | raise Exception("Specified file doesn't exists: %s" | ||
4118 | 812 | % abspath) | ||
4119 | 813 | |||
4120 | 814 | if not os.path.exists("%s.asc" % abspath): | ||
4121 | 815 | raise Exception("The GPG file signature doesn't exists: " | ||
4122 | 816 | "%s.asc" % abspath) | ||
4123 | 817 | |||
4124 | 818 | with open(abspath, "rb") as fd: | ||
4125 | 819 | checksum = sha256(fd.read()).hexdigest() | ||
4126 | 820 | |||
4127 | 821 | files.append({'order': count, | ||
4128 | 822 | 'path': "/%s" % "/".join(relpath.split(os.sep)), | ||
4129 | 823 | 'checksum': checksum, | ||
4130 | 824 | 'signature': "/%s.asc" % "/".join( | ||
4131 | 825 | relpath.split(os.sep)), | ||
4132 | 826 | 'size': int(os.stat(abspath).st_size)}) | ||
4133 | 827 | |||
4134 | 828 | count += 1 | ||
4135 | 829 | |||
4136 | 830 | image = {} | ||
4137 | 831 | |||
4138 | 832 | if entry_type == "delta": | ||
4139 | 833 | if not base: | ||
4140 | 834 | raise KeyError("Missing base version for delta image.") | ||
4141 | 835 | image['base'] = int(base) | ||
4142 | 836 | elif base: | ||
4143 | 837 | raise KeyError("Base version set for full image.") | ||
4144 | 838 | |||
4145 | 839 | if bootme: | ||
4146 | 840 | image['bootme'] = bootme | ||
4147 | 841 | |||
4148 | 842 | if minversion: | ||
4149 | 843 | if entry_type == "delta": | ||
4150 | 844 | raise KeyError("Minimum version set for delta image.") | ||
4151 | 845 | image['minversion'] = minversion | ||
4152 | 846 | |||
4153 | 847 | image['description'] = description | ||
4154 | 848 | image['files'] = files | ||
4155 | 849 | image['type'] = entry_type | ||
4156 | 850 | image['version'] = version | ||
4157 | 851 | index['images'].append(image) | ||
4158 | 852 | |||
4159 | 853 | return True | ||
4160 | 854 | |||
4161 | 855 | def expire_images(self, max_images): | ||
4162 | 856 | """ | ||
4163 | 857 | Expire images keeping the last <max_images> full images and | ||
4164 | 858 | their deltas. Also remove any delta that has an expired image | ||
4165 | 859 | as its base. | ||
4166 | 860 | """ | ||
4167 | 861 | |||
4168 | 862 | full_images = sorted([image for image in self.list_images() | ||
4169 | 863 | if image['type'] == "full"], | ||
4170 | 864 | key=lambda image: image['version']) | ||
4171 | 865 | |||
4172 | 866 | to_remove = len(full_images) - max_images | ||
4173 | 867 | if to_remove <= 0: | ||
4174 | 868 | return True | ||
4175 | 869 | |||
4176 | 870 | full_remove = full_images[:to_remove] | ||
4177 | 871 | remove_version = [image['version'] for image in full_remove] | ||
4178 | 872 | |||
4179 | 873 | for image in self.list_images(): | ||
4180 | 874 | if image['type'] == "full": | ||
4181 | 875 | if image['version'] in remove_version: | ||
4182 | 876 | self.remove_image(image['type'], image['version']) | ||
4183 | 877 | else: | ||
4184 | 878 | if (image['version'] in remove_version | ||
4185 | 879 | or image['base'] in remove_version): | ||
4186 | 880 | self.remove_image(image['type'], image['version'], | ||
4187 | 881 | image['base']) | ||
4188 | 882 | |||
4189 | 883 | return True | ||
4190 | 884 | |||
4191 | 885 | def get_image(self, entry_type, version, base=None): | ||
4192 | 886 | """ | ||
4193 | 887 | Look for an image and return a dict representation of it. | ||
4194 | 888 | """ | ||
4195 | 889 | |||
4196 | 890 | if entry_type not in ("full", "delta"): | ||
4197 | 891 | raise ValueError("Invalid image type: %s" % entry_type) | ||
4198 | 892 | |||
4199 | 893 | if entry_type == "delta" and not base: | ||
4200 | 894 | raise ValueError("Missing base version for delta image.") | ||
4201 | 895 | |||
4202 | 896 | with index_json(self.config, self.indexpath) as index: | ||
4203 | 897 | match = [] | ||
4204 | 898 | for image in index['images']: | ||
4205 | 899 | if (image['type'] == entry_type and image['version'] == version | ||
4206 | 900 | and (image['type'] == "full" or | ||
4207 | 901 | image['base'] == base)): | ||
4208 | 902 | match.append(image) | ||
4209 | 903 | |||
4210 | 904 | if len(match) != 1: | ||
4211 | 905 | raise IndexError("Couldn't find a match.") | ||
4212 | 906 | |||
4213 | 907 | return match[0] | ||
4214 | 908 | |||
4215 | 909 | def get_phased_percentage(self, version): | ||
4216 | 910 | """ | ||
4217 | 911 | Returns the phasing percentage for a given version. | ||
4218 | 912 | """ | ||
4219 | 913 | |||
4220 | 914 | for entry in self.list_images(): | ||
4221 | 915 | if entry['version'] == version: | ||
4222 | 916 | if "phased-percentage" in entry: | ||
4223 | 917 | return entry['phased-percentage'] | ||
4224 | 918 | else: | ||
4225 | 919 | return 100 | ||
4226 | 920 | else: | ||
4227 | 921 | raise IndexError("Invalid version number: %s" % version) | ||
4228 | 922 | |||
4229 | 923 | def list_images(self): | ||
4230 | 924 | """ | ||
4231 | 925 | Returns a list of all existing images, each image is a dict. | ||
4232 | 926 | This is simply a decoded version of the image array in index.json | ||
4233 | 927 | """ | ||
4234 | 928 | |||
4235 | 929 | with index_json(self.config, self.indexpath) as index: | ||
4236 | 930 | return index['images'] | ||
4237 | 931 | |||
4238 | 932 | def remove_image(self, entry_type, version, base=None): | ||
4239 | 933 | """ | ||
4240 | 934 | Remove an image. | ||
4241 | 935 | """ | ||
4242 | 936 | |||
4243 | 937 | image = self.get_image(entry_type, version, base) | ||
4244 | 938 | with index_json(self.config, self.indexpath, True) as index: | ||
4245 | 939 | index['images'].remove(image) | ||
4246 | 940 | |||
4247 | 941 | return True | ||
4248 | 942 | |||
4249 | 943 | def set_description(self, entry_type, version, description, | ||
4250 | 944 | translations={}, base=None): | ||
4251 | 945 | """ | ||
4252 | 946 | Set or update an image description. | ||
4253 | 947 | """ | ||
4254 | 948 | |||
4255 | 949 | if translations and not isinstance(translations, dict): | ||
4256 | 950 | raise TypeError("translations must be a dict.") | ||
4257 | 951 | |||
4258 | 952 | image = self.get_image(entry_type, version, base) | ||
4259 | 953 | |||
4260 | 954 | with index_json(self.config, self.indexpath, True) as index: | ||
4261 | 955 | for entry in index['images']: | ||
4262 | 956 | if entry != image: | ||
4263 | 957 | continue | ||
4264 | 958 | |||
4265 | 959 | entry['description'] = description | ||
4266 | 960 | for langid, value in translations.items(): | ||
4267 | 961 | entry['description_%s' % langid] = value | ||
4268 | 962 | |||
4269 | 963 | break | ||
4270 | 964 | |||
4271 | 965 | return True | ||
4272 | 966 | |||
4273 | 967 | def set_phased_percentage(self, version, percentage): | ||
4274 | 968 | """ | ||
4275 | 969 | Set the phasing percentage on an image version. | ||
4276 | 970 | """ | ||
4277 | 971 | |||
4278 | 972 | if not isinstance(percentage, int): | ||
4279 | 973 | raise TypeError("percentage must be an integer.") | ||
4280 | 974 | |||
4281 | 975 | if percentage < 0 or percentage > 100: | ||
4282 | 976 | raise ValueError("percentage must be >= 0 and <= 100.") | ||
4283 | 977 | |||
4284 | 978 | with index_json(self.config, self.indexpath, True) as index: | ||
4285 | 979 | versions = sorted({entry['version'] for entry in index['images']}) | ||
4286 | 980 | |||
4287 | 981 | last_version = None | ||
4288 | 982 | if versions: | ||
4289 | 983 | last_version = versions[-1] | ||
4290 | 984 | |||
4291 | 985 | if version not in versions: | ||
4292 | 986 | raise IndexError("Version doesn't exist: %s" % version) | ||
4293 | 987 | |||
4294 | 988 | if version != last_version: | ||
4295 | 989 | raise Exception("Phased percentage can only be set on the " | ||
4296 | 990 | "latest image") | ||
4297 | 991 | |||
4298 | 992 | for entry in index['images']: | ||
4299 | 993 | if entry['version'] == version: | ||
4300 | 994 | if percentage == 100 and "phased-percentage" in entry: | ||
4301 | 995 | entry.pop("phased-percentage") | ||
4302 | 996 | elif percentage != 100: | ||
4303 | 997 | entry['phased-percentage'] = percentage | ||
4304 | 998 | |||
4305 | 999 | return True | ||
4306 | 0 | 1000 | ||
4307 | === added directory 'secret' | |||
4308 | === added directory 'secret/gpg' | |||
4309 | === added directory 'secret/gpg/keyrings' | |||
4310 | === added directory 'secret/gpg/keys' | |||
4311 | === added directory 'secret/ssh' | |||
4312 | === added directory 'state' | |||
4313 | === added directory 'tests' | |||
4314 | === added file 'tests/generate-keys' | |||
4315 | --- tests/generate-keys 1970-01-01 00:00:00 +0000 | |||
4316 | +++ tests/generate-keys 2014-10-10 11:11:17 +0000 | |||
4317 | @@ -0,0 +1,52 @@ | |||
4318 | 1 | #!/usr/bin/python | ||
4319 | 2 | # -*- coding: utf-8 -*- | ||
4320 | 3 | |||
4321 | 4 | # Copyright (C) 2013 Canonical Ltd. | ||
4322 | 5 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
4323 | 6 | |||
4324 | 7 | # This program is free software: you can redistribute it and/or modify | ||
4325 | 8 | # it under the terms of the GNU General Public License as published by | ||
4326 | 9 | # the Free Software Foundation; version 3 of the License. | ||
4327 | 10 | # | ||
4328 | 11 | # This program is distributed in the hope that it will be useful, | ||
4329 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
4330 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
4331 | 14 | # GNU General Public License for more details. | ||
4332 | 15 | # | ||
4333 | 16 | # You should have received a copy of the GNU General Public License | ||
4334 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
4335 | 18 | |||
4336 | 19 | import os | ||
4337 | 20 | import shutil | ||
4338 | 21 | |||
4339 | 22 | import sys | ||
4340 | 23 | sys.path.insert(0, 'lib') | ||
4341 | 24 | |||
4342 | 25 | from systemimage import gpg | ||
4343 | 26 | |||
4344 | 27 | target_dir = "tests/keys/" | ||
4345 | 28 | if not os.path.exists(target_dir): | ||
4346 | 29 | raise Exception("Missing tests/keys directory") | ||
4347 | 30 | |||
4348 | 31 | keys = (("archive-master", "[TESTING] Ubuntu Archive Master Signing Key", | ||
4349 | 32 | "ftpmaster@ubuntu.com", 0), | ||
4350 | 33 | ("image-master", "[TESTING] Ubuntu System Image Master Signing Key", | ||
4351 | 34 | "system-image@ubuntu.com", 0), | ||
4352 | 35 | ("image-signing", "[TESTING] Ubuntu System Image Signing Key (YYYY)", | ||
4353 | 36 | "system-image@ubuntu.com", "2y"), | ||
4354 | 37 | ("device-signing", "[TESTING] Random OEM Signing Key (YYYY)", | ||
4355 | 38 | "system-image@ubuntu.com", "2y")) | ||
4356 | 39 | |||
4357 | 40 | for key_name, key_description, key_email, key_expiry in keys: | ||
4358 | 41 | key_dir = "%s/%s/" % (target_dir, key_name) | ||
4359 | 42 | if os.path.exists(key_dir): | ||
4360 | 43 | shutil.rmtree(key_dir) | ||
4361 | 44 | os.makedirs(key_dir) | ||
4362 | 45 | |||
4363 | 46 | uid = gpg.generate_signing_key(key_dir, key_description, key_email, | ||
4364 | 47 | key_expiry) | ||
4365 | 48 | |||
4366 | 49 | print("%s <%s>" % (uid.name, uid.email)) | ||
4367 | 50 | |||
4368 | 51 | # All done, let's mark it as done | ||
4369 | 52 | open("tests/keys/generated", "w+").close() | ||
4370 | 0 | 53 | ||
4371 | === added directory 'tests/keys' | |||
4372 | === added file 'tests/run' | |||
4373 | --- tests/run 1970-01-01 00:00:00 +0000 | |||
4374 | +++ tests/run 2014-10-10 11:11:17 +0000 | |||
4375 | @@ -0,0 +1,60 @@ | |||
4376 | 1 | #!/usr/bin/python | ||
4377 | 2 | # -*- coding: utf-8 -*- | ||
4378 | 3 | |||
4379 | 4 | # Copyright (C) 2013 Canonical Ltd. | ||
4380 | 5 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
4381 | 6 | |||
4382 | 7 | # This program is free software: you can redistribute it and/or modify | ||
4383 | 8 | # it under the terms of the GNU General Public License as published by | ||
4384 | 9 | # the Free Software Foundation; version 3 of the License. | ||
4385 | 10 | # | ||
4386 | 11 | # This program is distributed in the hope that it will be useful, | ||
4387 | 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
4388 | 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
4389 | 14 | # GNU General Public License for more details. | ||
4390 | 15 | # | ||
4391 | 16 | # You should have received a copy of the GNU General Public License | ||
4392 | 17 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
4393 | 18 | |||
4394 | 19 | # Dependencies: | ||
4395 | 20 | # - python2 (>= 2.7): python-gpgme, python-coverage | ||
4396 | 21 | # - python3 (>= 3.2): python3-gpgme | ||
4397 | 22 | |||
4398 | 23 | import glob | ||
4399 | 24 | import os | ||
4400 | 25 | import re | ||
4401 | 26 | import shutil | ||
4402 | 27 | import sys | ||
4403 | 28 | import unittest | ||
4404 | 29 | |||
4405 | 30 | coverage = True | ||
4406 | 31 | try: | ||
4407 | 32 | from coverage import coverage | ||
4408 | 33 | cov = coverage() | ||
4409 | 34 | cov.start() | ||
4410 | 35 | except ImportError: | ||
4411 | 36 | print("No coverage report, make sure python-coverage is installed") | ||
4412 | 37 | coverage = False | ||
4413 | 38 | |||
4414 | 39 | sys.path.insert(0, 'lib') | ||
4415 | 40 | |||
4416 | 41 | if len(sys.argv) > 1: | ||
4417 | 42 | test_filter = sys.argv[1] | ||
4418 | 43 | else: | ||
4419 | 44 | test_filter = '' | ||
4420 | 45 | |||
4421 | 46 | tests = [t[:-3] for t in os.listdir('tests') | ||
4422 | 47 | if t.startswith('test_') and t.endswith('.py') and | ||
4423 | 48 | re.search(test_filter, t)] | ||
4424 | 49 | tests.sort() | ||
4425 | 50 | suite = unittest.TestLoader().loadTestsFromNames(tests) | ||
4426 | 51 | res = unittest.TextTestRunner(verbosity=2).run(suite) | ||
4427 | 52 | |||
4428 | 53 | if coverage: | ||
4429 | 54 | if os.path.exists('tests/coverage'): | ||
4430 | 55 | shutil.rmtree('tests/coverage') | ||
4431 | 56 | cov.stop() | ||
4432 | 57 | cov.html_report(include=glob.glob("lib/systemimage/*.py"), | ||
4433 | 58 | directory='tests/coverage') | ||
4434 | 59 | print("") | ||
4435 | 60 | cov.report(include=glob.glob("lib/systemimage/*.py")) | ||
4436 | 0 | 61 | ||
4437 | === added file 'tests/test_config.py' | |||
4438 | --- tests/test_config.py 1970-01-01 00:00:00 +0000 | |||
4439 | +++ tests/test_config.py 2014-10-10 11:11:17 +0000 | |||
4440 | @@ -0,0 +1,281 @@ | |||
4441 | 1 | # -*- coding: utf-8 -*- | ||
4442 | 2 | |||
4443 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
4444 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
4445 | 5 | |||
4446 | 6 | # This program is free software: you can redistribute it and/or modify | ||
4447 | 7 | # it under the terms of the GNU General Public License as published by | ||
4448 | 8 | # the Free Software Foundation; version 3 of the License. | ||
4449 | 9 | # | ||
4450 | 10 | # This program is distributed in the hope that it will be useful, | ||
4451 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
4452 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
4453 | 13 | # GNU General Public License for more details. | ||
4454 | 14 | # | ||
4455 | 15 | # You should have received a copy of the GNU General Public License | ||
4456 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
4457 | 17 | |||
4458 | 18 | import os | ||
4459 | 19 | import shutil | ||
4460 | 20 | import tempfile | ||
4461 | 21 | import unittest | ||
4462 | 22 | |||
4463 | 23 | from systemimage import config | ||
4464 | 24 | from systemimage import tools | ||
4465 | 25 | |||
4466 | 26 | try: | ||
4467 | 27 | from unittest import mock | ||
4468 | 28 | except ImportError: | ||
4469 | 29 | import mock | ||
4470 | 30 | |||
4471 | 31 | |||
4472 | 32 | class ConfigTests(unittest.TestCase): | ||
4473 | 33 | def setUp(self): | ||
4474 | 34 | temp_directory = tempfile.mkdtemp() | ||
4475 | 35 | self.temp_directory = temp_directory | ||
4476 | 36 | |||
4477 | 37 | def tearDown(self): | ||
4478 | 38 | shutil.rmtree(self.temp_directory) | ||
4479 | 39 | |||
4480 | 40 | @mock.patch("subprocess.call") | ||
4481 | 41 | def test_config(self, mock_call): | ||
4482 | 42 | # Good complete config | ||
4483 | 43 | config_path = os.path.join(self.temp_directory, "config") | ||
4484 | 44 | key_path = os.path.join(self.temp_directory, "key") | ||
4485 | 45 | |||
4486 | 46 | with open(config_path, "w+") as fd: | ||
4487 | 47 | fd.write("""[global] | ||
4488 | 48 | base_path = %s | ||
4489 | 49 | mirrors = a, b | ||
4490 | 50 | |||
4491 | 51 | [mirror_default] | ||
4492 | 52 | ssh_user = user | ||
4493 | 53 | ssh_key = key | ||
4494 | 54 | ssh_port = 22 | ||
4495 | 55 | ssh_command = command | ||
4496 | 56 | |||
4497 | 57 | [mirror_a] | ||
4498 | 58 | ssh_host = hosta | ||
4499 | 59 | |||
4500 | 60 | [mirror_b] | ||
4501 | 61 | ssh_host = hostb | ||
4502 | 62 | """ % self.temp_directory) | ||
4503 | 63 | |||
4504 | 64 | conf = config.Config(config_path) | ||
4505 | 65 | |||
4506 | 66 | # Test ssh sync | ||
4507 | 67 | tools.sync_mirrors(conf) | ||
4508 | 68 | expected_calls = [((['ssh', '-i', key_path, '-l', 'user', | ||
4509 | 69 | '-p', '22', 'hosta', 'command'],), {}), | ||
4510 | 70 | ((['ssh', '-i', key_path, '-l', 'user', | ||
4511 | 71 | '-p', '22', 'hostb', 'command'],), {})] | ||
4512 | 72 | self.assertEquals(mock_call.call_args_list, expected_calls) | ||
4513 | 73 | |||
4514 | 74 | # Invalid config | ||
4515 | 75 | invalid_config_path = os.path.join(self.temp_directory, | ||
4516 | 76 | "invalid_config") | ||
4517 | 77 | with open(invalid_config_path, "w+") as fd: | ||
4518 | 78 | fd.write("""invalid""") | ||
4519 | 79 | |||
4520 | 80 | self.assertEquals(config.parse_config(invalid_config_path), {}) | ||
4521 | 81 | |||
4522 | 82 | self.assertRaises( | ||
4523 | 83 | Exception, config.Config, os.path.join(self.temp_directory, | ||
4524 | 84 | "invalid")) | ||
4525 | 85 | |||
4526 | 86 | # Test loading config from default location | ||
4527 | 87 | config_file = os.path.join(os.path.dirname(config.__file__), | ||
4528 | 88 | "../../etc/config") | ||
4529 | 89 | |||
4530 | 90 | old_pwd = os.getcwd() | ||
4531 | 91 | os.chdir(self.temp_directory) | ||
4532 | 92 | if not os.path.exists(config_file): | ||
4533 | 93 | self.assertRaises(Exception, config.Config) | ||
4534 | 94 | else: | ||
4535 | 95 | self.assertTrue(config.Config()) | ||
4536 | 96 | os.chdir(old_pwd) | ||
4537 | 97 | |||
4538 | 98 | # Empty config | ||
4539 | 99 | empty_config_path = os.path.join(self.temp_directory, | ||
4540 | 100 | "empty_config") | ||
4541 | 101 | with open(empty_config_path, "w+") as fd: | ||
4542 | 102 | fd.write("") | ||
4543 | 103 | |||
4544 | 104 | conf = config.Config(empty_config_path) | ||
4545 | 105 | self.assertEquals(conf.base_path, os.getcwd()) | ||
4546 | 106 | |||
4547 | 107 | # Single mirror config | ||
4548 | 108 | single_mirror_config_path = os.path.join(self.temp_directory, | ||
4549 | 109 | "single_mirror_config") | ||
4550 | 110 | with open(single_mirror_config_path, "w+") as fd: | ||
4551 | 111 | fd.write("""[global] | ||
4552 | 112 | mirrors = a | ||
4553 | 113 | |||
4554 | 114 | [mirror_default] | ||
4555 | 115 | ssh_user = user | ||
4556 | 116 | ssh_key = key | ||
4557 | 117 | ssh_port = 22 | ||
4558 | 118 | ssh_command = command | ||
4559 | 119 | |||
4560 | 120 | [mirror_a] | ||
4561 | 121 | ssh_host = host | ||
4562 | 122 | """) | ||
4563 | 123 | |||
4564 | 124 | conf = config.Config(single_mirror_config_path) | ||
4565 | 125 | self.assertEquals(conf.mirrors['a'].ssh_command, "command") | ||
4566 | 126 | |||
4567 | 127 | # Missing mirror_default | ||
4568 | 128 | missing_default_config_path = os.path.join(self.temp_directory, | ||
4569 | 129 | "missing_default_config") | ||
4570 | 130 | with open(missing_default_config_path, "w+") as fd: | ||
4571 | 131 | fd.write("""[global] | ||
4572 | 132 | mirrors = a | ||
4573 | 133 | |||
4574 | 134 | [mirror_a] | ||
4575 | 135 | ssh_host = host | ||
4576 | 136 | """) | ||
4577 | 137 | |||
4578 | 138 | self.assertRaises(KeyError, config.Config, missing_default_config_path) | ||
4579 | 139 | |||
4580 | 140 | # Missing mirror key | ||
4581 | 141 | missing_key_config_path = os.path.join(self.temp_directory, | ||
4582 | 142 | "missing_key_config") | ||
4583 | 143 | with open(missing_key_config_path, "w+") as fd: | ||
4584 | 144 | fd.write("""[global] | ||
4585 | 145 | mirrors = a | ||
4586 | 146 | |||
4587 | 147 | [mirror_default] | ||
4588 | 148 | ssh_user = user | ||
4589 | 149 | ssh_port = 22 | ||
4590 | 150 | ssh_command = command | ||
4591 | 151 | |||
4592 | 152 | [mirror_a] | ||
4593 | 153 | ssh_host = host | ||
4594 | 154 | """) | ||
4595 | 155 | |||
4596 | 156 | self.assertRaises(KeyError, config.Config, missing_key_config_path) | ||
4597 | 157 | |||
4598 | 158 | # Missing mirror | ||
4599 | 159 | missing_mirror_config_path = os.path.join(self.temp_directory, | ||
4600 | 160 | "missing_mirror_config") | ||
4601 | 161 | with open(missing_mirror_config_path, "w+") as fd: | ||
4602 | 162 | fd.write("""[global] | ||
4603 | 163 | mirrors = a | ||
4604 | 164 | |||
4605 | 165 | [mirror_default] | ||
4606 | 166 | ssh_user = user | ||
4607 | 167 | ssh_port = 22 | ||
4608 | 168 | ssh_command = command | ||
4609 | 169 | ssh_key = key | ||
4610 | 170 | """) | ||
4611 | 171 | |||
4612 | 172 | self.assertRaises(KeyError, config.Config, missing_mirror_config_path) | ||
4613 | 173 | |||
4614 | 174 | # Missing ssh_host | ||
4615 | 175 | missing_host_config_path = os.path.join(self.temp_directory, | ||
4616 | 176 | "missing_host_config") | ||
4617 | 177 | with open(missing_host_config_path, "w+") as fd: | ||
4618 | 178 | fd.write("""[global] | ||
4619 | 179 | mirrors = a | ||
4620 | 180 | |||
4621 | 181 | [mirror_default] | ||
4622 | 182 | ssh_user = user | ||
4623 | 183 | ssh_port = 22 | ||
4624 | 184 | ssh_command = command | ||
4625 | 185 | ssh_key = key | ||
4626 | 186 | |||
4627 | 187 | [mirror_a] | ||
4628 | 188 | ssh_user = other-user | ||
4629 | 189 | """) | ||
4630 | 190 | |||
4631 | 191 | self.assertRaises(KeyError, config.Config, missing_host_config_path) | ||
4632 | 192 | |||
4633 | 193 | # Test with env path | ||
4634 | 194 | test_path = os.path.join(self.temp_directory, "a", "b") | ||
4635 | 195 | os.makedirs(os.path.join(test_path, "etc")) | ||
4636 | 196 | with open(os.path.join(test_path, "etc", "config"), "w+") as fd: | ||
4637 | 197 | fd.write("[global]\nbase_path = a/b/c") | ||
4638 | 198 | os.environ['SYSTEM_IMAGE_ROOT'] = test_path | ||
4639 | 199 | test_config = config.Config() | ||
4640 | 200 | self.assertEquals(test_config.base_path, "a/b/c") | ||
4641 | 201 | |||
4642 | 202 | # Test the channels config | ||
4643 | 203 | # # Multiple channels | ||
4644 | 204 | channel_config_path = os.path.join(self.temp_directory, | ||
4645 | 205 | "channel_config") | ||
4646 | 206 | with open(channel_config_path, "w+") as fd: | ||
4647 | 207 | fd.write("""[global] | ||
4648 | 208 | channels = a, b | ||
4649 | 209 | |||
4650 | 210 | [channel_a] | ||
4651 | 211 | type = manual | ||
4652 | 212 | fullcount = 10 | ||
4653 | 213 | |||
4654 | 214 | [channel_b] | ||
4655 | 215 | type = auto | ||
4656 | 216 | versionbase = 5 | ||
4657 | 217 | deltabase = a, b | ||
4658 | 218 | files = a, b | ||
4659 | 219 | file_a = test;arg1;arg2 | ||
4660 | 220 | file_b = test;arg3;arg4 | ||
4661 | 221 | """) | ||
4662 | 222 | |||
4663 | 223 | conf = config.Config(channel_config_path) | ||
4664 | 224 | self.assertEquals( | ||
4665 | 225 | conf.channels['b'].files, | ||
4666 | 226 | [{'name': 'a', 'generator': 'test', | ||
4667 | 227 | 'arguments': ['arg1', 'arg2']}, | ||
4668 | 228 | {'name': 'b', 'generator': 'test', | ||
4669 | 229 | 'arguments': ['arg3', 'arg4']}]) | ||
4670 | 230 | |||
4671 | 231 | self.assertEquals(conf.channels['a'].fullcount, 10) | ||
4672 | 232 | self.assertEquals(conf.channels['a'].versionbase, 1) | ||
4673 | 233 | self.assertEquals(conf.channels['a'].deltabase, ['a']) | ||
4674 | 234 | |||
4675 | 235 | self.assertEquals(conf.channels['b'].fullcount, 0) | ||
4676 | 236 | self.assertEquals(conf.channels['b'].versionbase, 5) | ||
4677 | 237 | self.assertEquals(conf.channels['b'].deltabase, ["a", "b"]) | ||
4678 | 238 | |||
4679 | 239 | # # Single channel | ||
4680 | 240 | single_channel_config_path = os.path.join(self.temp_directory, | ||
4681 | 241 | "single_channel_config") | ||
4682 | 242 | with open(single_channel_config_path, "w+") as fd: | ||
4683 | 243 | fd.write("""[global] | ||
4684 | 244 | channels = a | ||
4685 | 245 | |||
4686 | 246 | [channel_a] | ||
4687 | 247 | deltabase = a | ||
4688 | 248 | versionbase = 1 | ||
4689 | 249 | files = a | ||
4690 | 250 | file_a = test;arg1;arg2 | ||
4691 | 251 | """) | ||
4692 | 252 | |||
4693 | 253 | conf = config.Config(single_channel_config_path) | ||
4694 | 254 | self.assertEquals( | ||
4695 | 255 | conf.channels['a'].files, | ||
4696 | 256 | [{'name': 'a', 'generator': 'test', | ||
4697 | 257 | 'arguments': ['arg1', 'arg2']}]) | ||
4698 | 258 | |||
4699 | 259 | # # Invalid channel | ||
4700 | 260 | invalid_channel_config_path = os.path.join(self.temp_directory, | ||
4701 | 261 | "invalid_channel_config") | ||
4702 | 262 | with open(invalid_channel_config_path, "w+") as fd: | ||
4703 | 263 | fd.write("""[global] | ||
4704 | 264 | channels = a | ||
4705 | 265 | """) | ||
4706 | 266 | |||
4707 | 267 | self.assertRaises(KeyError, config.Config, invalid_channel_config_path) | ||
4708 | 268 | |||
4709 | 269 | # # Invalid file | ||
4710 | 270 | invalid_file_channel_config_path = os.path.join( | ||
4711 | 271 | self.temp_directory, "invalid_file_channel_config") | ||
4712 | 272 | with open(invalid_file_channel_config_path, "w+") as fd: | ||
4713 | 273 | fd.write("""[global] | ||
4714 | 274 | channels = a | ||
4715 | 275 | |||
4716 | 276 | [channel_a] | ||
4717 | 277 | files = a | ||
4718 | 278 | """) | ||
4719 | 279 | |||
4720 | 280 | self.assertRaises(KeyError, config.Config, | ||
4721 | 281 | invalid_file_channel_config_path) | ||
4722 | 0 | 282 | ||
4723 | === added file 'tests/test_diff.py' | |||
4724 | --- tests/test_diff.py 1970-01-01 00:00:00 +0000 | |||
4725 | +++ tests/test_diff.py 2014-10-10 11:11:17 +0000 | |||
4726 | @@ -0,0 +1,265 @@ | |||
4727 | 1 | # -*- coding: utf-8 -*- | ||
4728 | 2 | |||
4729 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
4730 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> | ||
4731 | 5 | |||
4732 | 6 | # This program is free software: you can redistribute it and/or modify | ||
4733 | 7 | # it under the terms of the GNU General Public License as published by | ||
4734 | 8 | # the Free Software Foundation; version 3 of the License. | ||
4735 | 9 | # | ||
4736 | 10 | # This program is distributed in the hope that it will be useful, | ||
4737 | 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
4738 | 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
4739 | 13 | # GNU General Public License for more details. | ||
4740 | 14 | # | ||
4741 | 15 | # You should have received a copy of the GNU General Public License | ||
4742 | 16 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
4743 | 17 | |||
4744 | 18 | import shutil | ||
4745 | 19 | import sys | ||
4746 | 20 | import tarfile | ||
4747 | 21 | import tempfile | ||
4748 | 22 | import unittest | ||
4749 | 23 | |||
4750 | 24 | from io import BytesIO, StringIO | ||
4751 | 25 | from systemimage.diff import ImageDiff, compare_files | ||
4752 | 26 | |||
4753 | 27 | |||
4754 | 28 | class DiffTests(unittest.TestCase): | ||
4755 | 29 | def setUp(self): | ||
4756 | 30 | temp_directory = tempfile.mkdtemp() | ||
4757 | 31 | |||
4758 | 32 | source_tarball_path = "%s/source.tar" % temp_directory | ||
4759 | 33 | target_tarball_path = "%s/target.tar" % temp_directory | ||
4760 | 34 | |||
4761 | 35 | source_tarball = tarfile.open(source_tarball_path, "w") | ||
4762 | 36 | target_tarball = tarfile.open(target_tarball_path, "w") | ||
4763 | 37 | |||
4764 | 38 | # Standard file | ||
4765 | 39 | a = tarfile.TarInfo() | ||
4766 | 40 | a.name = "a" | ||
4767 | 41 | a.size = 4 | ||
4768 | 42 | |||
4769 | 43 | # Standard file | ||
4770 | 44 | b = tarfile.TarInfo() | ||
4771 | 45 | b.name = "b" | ||
4772 | 46 | b.size = 4 | ||
4773 | 47 | |||
4774 | 48 | # Standard directory | ||
4775 | 49 | c_dir = tarfile.TarInfo() | ||
4776 | 50 | c_dir.name = "c" | ||
4777 | 51 | c_dir.type = tarfile.DIRTYPE | ||
4778 | 52 | c_dir.mode = 0o755 | ||
4779 | 53 | |||
4780 | 54 | # Standard file | ||
4781 | 55 | c = tarfile.TarInfo() | ||
4782 | 56 | c.name = "c/c" | ||
4783 | 57 | c.size = 4 | ||
4784 | 58 | |||
4785 | 59 | # Standard file | ||
4786 | 60 | d_source = tarfile.TarInfo() | ||
4787 | 61 | d_source.name = "c/d" | ||
4788 | 62 | d_source.size = 8 | ||
4789 | 63 | d_source.mtime = 1000 | ||
4790 | 64 | |||
4791 | 65 | # Standard file | ||
4792 | 66 | d_target = tarfile.TarInfo() | ||
4793 | 67 | d_target.name = "c/d" | ||
4794 | 68 | d_target.size = 8 | ||
4795 | 69 | d_target.mtime = 1234 | ||
4796 | 70 | |||
4797 | 71 | # Symlink | ||
4798 | 72 | e = tarfile.TarInfo() | ||
4799 | 73 | e.name = "e" | ||
4800 | 74 | e.type = tarfile.SYMTYPE | ||
4801 | 75 | e.linkname = "a" | ||
4802 | 76 | |||
4803 | 77 | # Hard link | ||
4804 | 78 | f = tarfile.TarInfo() | ||
4805 | 79 | f.name = "f" | ||
4806 | 80 | f.type = tarfile.LNKTYPE | ||
4807 | 81 | f.linkname = "a" | ||
4808 | 82 | |||
4809 | 83 | # Standard file | ||
4810 | 84 | g_source = tarfile.TarInfo() | ||
4811 | 85 | g_source.name = "c/g" | ||
4812 | 86 | g_source.size = 4 | ||
4813 | 87 | g_source.mtime = 1000 | ||
4814 | 88 | |||
4815 | 89 | # Standard file | ||
4816 | 90 | g_target = tarfile.TarInfo() | ||
4817 | 91 | g_target.name = "c/g" | ||
4818 | 92 | g_target.size = 4 | ||
4819 | 93 | g_target.mtime = 1001 | ||
4820 | 94 | |||
4821 | 95 | # Hard link | ||
4822 | 96 | h_source = tarfile.TarInfo() | ||
4823 | 97 | h_source.name = "c/h" | ||
4824 | 98 | h_source.type = tarfile.LNKTYPE | ||
4825 | 99 | h_source.linkname = "d" | ||
4826 | 100 | h_source.mtime = 1000 | ||
4827 | 101 | |||
4828 | 102 | # Hard link | ||
4829 | 103 | h_target = tarfile.TarInfo() | ||
4830 | 104 | h_target.name = "c/h" | ||
4831 | 105 | h_target.type = tarfile.LNKTYPE | ||
4832 | 106 | h_target.linkname = "d" | ||
4833 | 107 | h_target.mtime = 1001 | ||
4834 | 108 | |||
4835 | 109 | # Hard link | ||
4836 | 110 | i = tarfile.TarInfo() | ||
4837 | 111 | i.name = "c/a_i" | ||
4838 | 112 | i.type = tarfile.LNKTYPE | ||
4839 | 113 | i.linkname = "c" | ||
4840 | 114 | |||
4841 | 115 | # Dangling symlink | ||
4842 | 116 | j = tarfile.TarInfo() | ||
4843 | 117 | j.name = "c/j" | ||
4844 | 118 | j.type = tarfile.SYMTYPE | ||
4845 | 119 | j.linkname = "j_non-existent" | ||
4846 | 120 | |||
4847 | 121 | # Standard directory | ||
4848 | 122 | k_dir = tarfile.TarInfo() | ||
4849 | 123 | k_dir.name = "dir" | ||
4850 | 124 | k_dir.type = tarfile.DIRTYPE | ||
4851 | 125 | k_dir.mode = 0o755 | ||
4852 | 126 | |||
4853 | 127 | # Dangling symlink | ||
4854 | 128 | l = tarfile.TarInfo() | ||
4855 | 129 | l.name = "dir" | ||
4856 | 130 | l.type = tarfile.SYMTYPE | ||
4857 | 131 | l.linkname = "l_non-existent" | ||
4858 | 132 | |||
4859 | 133 | # Standard file | ||
4860 | 134 | m_source = tarfile.TarInfo() | ||
4861 | 135 | m_source.name = "m" | ||
4862 | 136 | m_source.size = 4 | ||
4863 | 137 | |||
4864 | 138 | # Hard link | ||
4865 | 139 | m_target = tarfile.TarInfo() | ||
4866 | 140 | m_target.name = "m" | ||
4867 | 141 | m_target.type = tarfile.LNKTYPE | ||
4868 | 142 | m_target.linkname = "n" | ||
4869 | 143 | |||
4870 | 144 | # Hard link | ||
4871 | 145 | n_source = tarfile.TarInfo() | ||
4872 | 146 | n_source.name = "n" | ||
4873 | 147 | n_source.type = tarfile.LNKTYPE | ||
4874 | 148 | n_source.linkname = "m" | ||
4875 | 149 | |||
4876 | 150 | # Standard file | ||
4877 | 151 | n_target = tarfile.TarInfo() | ||
4878 | 152 | n_target.name = "n" | ||
4879 | 153 | n_target.size = 4 | ||
4880 | 154 | |||
4881 | 155 | # Hard link | ||
4882 | 156 | o_source = tarfile.TarInfo() | ||
4883 | 157 | o_source.name = "system/o.1" | ||
4884 | 158 | o_source.type = tarfile.LNKTYPE | ||
4885 | 159 | o_source.linkname = "system/o" | ||
4886 | 160 | |||
4887 | 161 | # Standard file | ||
4888 | 162 | o_target = tarfile.TarInfo() | ||
4889 | 163 | o_target.name = "system/o" | ||
4890 | 164 | o_target.size = 4 | ||
4891 | 165 | |||
4892 | 166 | source_tarball.addfile(a, BytesIO(b"test")) | ||
4893 | 167 | source_tarball.addfile(a, BytesIO(b"test")) | ||
4894 | 168 | source_tarball.addfile(a, BytesIO(b"test")) | ||
4895 | 169 | source_tarball.addfile(b, BytesIO(b"test")) | ||
4896 | 170 | source_tarball.addfile(c_dir) | ||
4897 | 171 | source_tarball.addfile(d_source, BytesIO(b"test-abc")) | ||
4898 | 172 | source_tarball.addfile(g_source, BytesIO(b"test")) | ||
4899 | 173 | source_tarball.addfile(h_source, BytesIO(b"test")) | ||
4900 | 174 | source_tarball.addfile(k_dir) | ||
4901 | 175 | source_tarball.addfile(m_source, BytesIO(b"test")) | ||
4902 | 176 | source_tarball.addfile(n_source) | ||
4903 | 177 | |||
4904 | 178 | target_tarball.addfile(a, BytesIO(b"test")) | ||
4905 | 179 | target_tarball.addfile(c_dir) | ||
4906 | 180 | target_tarball.addfile(c, BytesIO(b"test")) | ||
4907 | 181 | target_tarball.addfile(d_target, BytesIO(b"test-def")) | ||
4908 | 182 | target_tarball.addfile(e) | ||
4909 | 183 | target_tarball.addfile(f) | ||
4910 | 184 | target_tarball.addfile(g_target, BytesIO(b"test")) | ||
4911 | 185 | target_tarball.addfile(h_target, BytesIO(b"test")) | ||
4912 | 186 | target_tarball.addfile(i) | ||
4913 | 187 | target_tarball.addfile(j) | ||
4914 | 188 | target_tarball.addfile(l) | ||
4915 | 189 | target_tarball.addfile(n_target, BytesIO(b"test")) | ||
4916 | 190 | target_tarball.addfile(m_target) | ||
4917 | 191 | target_tarball.addfile(o_source) | ||
4918 | 192 | target_tarball.addfile(o_target) | ||
4919 | 193 | |||
4920 | 194 | source_tarball.close() | ||
4921 | 195 | target_tarball.close() | ||
4922 | 196 | |||
4923 | 197 | self.imagediff = ImageDiff(source_tarball_path, target_tarball_path) | ||
4924 | 198 | self.source_tarball_path = source_tarball_path | ||
4925 | 199 | self.target_tarball_path = target_tarball_path | ||
4926 | 200 | self.temp_directory = temp_directory | ||
4927 | 201 | |||
4928 | 202 | def tearDown(self): | ||
4929 | 203 | shutil.rmtree(self.temp_directory) | ||
4930 | 204 | |||
4931 | 205 | def test_content(self): | ||
4932 | 206 | content_set, content_dict = self.imagediff.scan_content("source") | ||
4933 | 207 | self.assertEquals(sorted(content_dict.keys()), | ||
4934 | 208 | ['a', 'b', 'c', 'c/d', 'c/g', 'c/h', 'dir', 'm', | ||
4935 | 209 | 'n']) | ||
4936 | 210 | |||
4937 | 211 | content_set, content_dict = self.imagediff.scan_content("target") | ||
4938 | 212 | self.assertEquals(sorted(content_dict.keys()), | ||
4939 | 213 | ['a', 'c', 'c/a_i', 'c/c', 'c/d', 'c/g', 'c/h', | ||
4940 | 214 | 'c/j', 'dir', 'e', 'f', 'm', 'n', 'system/o', | ||
4941 | 215 | 'system/o.1']) | ||
4942 | 216 | |||
4943 | 217 | def test_content_invalid_image(self): | ||
4944 | 218 | self.assertRaises(KeyError, self.imagediff.scan_content, "invalid") | ||
4945 | 219 | |||
4946 | 220 | def test_compare_files(self): | ||
4947 | 221 | self.assertEquals(compare_files(None, None), True) | ||
4948 | 222 | self.assertEquals(compare_files(None, BytesIO(b"abc")), False) | ||
4949 | 223 | |||
4950 | 224 | def test_compare_image(self): | ||
4951 | 225 | diff_set = self.imagediff.compare_images() | ||
4952 | 226 | self.assertTrue(("c/a_i", "add") in diff_set) | ||
4953 | 227 | |||
4954 | 228 | def test_print_changes(self): | ||
4955 | 229 | # Redirect stdout | ||
4956 | 230 | old_stdout = sys.stdout | ||
4957 | 231 | |||
4958 | 232 | # FIXME: Would be best to have something that works with both version | ||
4959 | 233 | if sys.version[0] == "3": | ||
4960 | 234 | sys.stdout = StringIO() | ||
4961 | 235 | else: | ||
4962 | 236 | sys.stdout = BytesIO() | ||
4963 | 237 | |||
4964 | 238 | self.imagediff.print_changes() | ||
4965 | 239 | |||
4966 | 240 | # Unredirect stdout | ||
4967 | 241 | output = sys.stdout.getvalue() | ||
4968 | 242 | sys.stdout = old_stdout | ||
4969 | 243 | |||
4970 | 244 | self.assertEquals(output, """ - b (del) | ||
4971 | 245 | - c/a_i (add) | ||
4972 | 246 | - c/c (add) | ||
4973 | 247 | - c/d (mod) | ||
4974 | 248 | - c/j (add) | ||
4975 | 249 | - dir (mod) | ||
4976 | 250 | - e (add) | ||
4977 | 251 | - f (add) | ||
4978 | 252 | - system/o (add) | ||
4979 | 253 | - system/o.1 (add) | ||
4980 | 254 | """) | ||
4981 | 255 | |||
4982 | 256 | def test_generate_tarball(self): | ||
4983 | 257 | output_tarball = "%s/output.tar" % self.temp_directory | ||
4984 | 258 | |||
4985 | 259 | self.imagediff.generate_diff_tarball(output_tarball) | ||
4986 | 260 | tarball = tarfile.open(output_tarball, "r") | ||
4987 | 261 | |||
4988 | 262 | files_list = [entry.name for entry in tarball] | ||
4989 | 263 | self.assertEquals(files_list, ['removed', 'c/c', 'c/a_i', 'c/d', 'c/j', | ||
4990 | 264 | 'dir', 'e', 'f', 'system/o', | ||
4991 | 265 | 'system/o.1']) | ||
4992 | 0 | 266 | ||
4993 | === added file 'tests/test_generators.py' | |||
4994 | --- tests/test_generators.py 1970-01-01 00:00:00 +0000 | |||
4995 | +++ tests/test_generators.py 2014-10-10 11:11:17 +0000 | |||
4996 | @@ -0,0 +1,1039 @@ | |||
4997 | 1 | # -*- coding: utf-8 -*- | ||
4998 | 2 | |||
4999 | 3 | # Copyright (C) 2013 Canonical Ltd. | ||
5000 | 4 | # Author: Stéphane Graber <stgraber@ubuntu.com> |