Merge lp:~james-w/pkgme-service/fab-deploy-tasks into lp:pkgme-service
- fab-deploy-tasks
- Merge into trunk
Proposed by
James Westby
Status: | Superseded |
---|---|
Proposed branch: | lp:~james-w/pkgme-service/fab-deploy-tasks |
Merge into: | lp:pkgme-service |
Diff against target: |
860 lines (+806/-0) 10 files modified
dev_config/apply (+48/-0) dev_config/manifests/pkgme_service.pp (+167/-0) dev_config/templates/celeryd.default.erb (+16/-0) dev_config/templates/celeryd.initscript.erb (+247/-0) dev_config/templates/django.wsgi.erb (+19/-0) dev_config/templates/httpd.conf.erb (+12/-0) dev_config/templates/production_credentials.cfg.erb (+27/-0) dev_config/templates/production_paths.py.erb (+13/-0) fabtasks/__init__.py (+1/-0) fabtasks/deploy.py (+256/-0) |
To merge this branch: | bzr merge lp:~james-w/pkgme-service/fab-deploy-tasks |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
canonical-consumer-applications | Pending | ||
Review via email: mp+89164@code.launchpad.net |
This proposal has been superseded by a proposal from 2012-01-18.
Commit message
Add fab tasks for dev deployment.
Description of the change
To post a comment you must log in.
- 29. By James Westby
-
Merge puppet improvements.
- 30. By James Westby
-
Merge puppet fixes.
- 31. By James Westby
-
Set the security group of the created instance so it is accessible.
- 32. By James Westby
-
Delete the security group before the instance.
- 33. By James Westby
-
Add a list_running_
ec2_instances command.
Unmerged revisions
- 33. By James Westby
-
Add a list_running_
ec2_instances command.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === added directory 'dev_config' |
2 | === added file 'dev_config/apply' |
3 | --- dev_config/apply 1970-01-01 00:00:00 +0000 |
4 | +++ dev_config/apply 2012-01-18 23:50:28 +0000 |
5 | @@ -0,0 +1,48 @@ |
6 | +#!/bin/bash |
7 | + |
8 | +set -e |
9 | + |
10 | +CREDS_FILE="${HOME}/credentials" |
11 | + |
12 | +has_cred() { |
13 | + CRED="$1" |
14 | + if [ -f "$CREDS_FILE" ]; then |
15 | + if grep -q "$CRED" "$CREDS_FILE"; then |
16 | + return 0 |
17 | + fi |
18 | + fi |
19 | + return 1 |
20 | +} |
21 | + |
22 | +get_cred() { |
23 | + CRED="$1" |
24 | + VALUE="$(grep "$CRED" "$CREDS_FILE" | sed -e 's/[^=]*=//')" |
25 | + echo $VALUE |
26 | +} |
27 | + |
28 | +generate_cred() { |
29 | + CRED="$1" |
30 | + VALUE="${RANDOM}" |
31 | + echo "${CRED}=${VALUE}" >> $CREDS_FILE |
32 | + get_cred $CRED |
33 | +} |
34 | + |
35 | +get_cred_or_generate() { |
36 | + CRED="$1" |
37 | + if has_cred "$CRED"; then |
38 | + get_cred "$CRED" |
39 | + else |
40 | + generate_cred "$CRED" |
41 | + fi |
42 | +} |
43 | + |
44 | +RABBIT_PASSWORD=$(get_cred_or_generate "RABBIT_PASSWORD") |
45 | +POSTGRES_PASSWORD=$(get_cred_or_generate "POSTGRES_PASSWORD") |
46 | +DJANGO_SECRET_KEY=$(get_cred_or_generate "DJANGO_SECRET_KEY") |
47 | + |
48 | +export FACTER_basedir="${HOME}/pkgme-service" |
49 | +export FACTER_django_secret_key=$DJANGO_SECRET_KEY |
50 | +export FACTER_postgres_password=$POSTGRES_PASSWORD |
51 | +export FACTER_rabbit_password=$RABBIT_PASSWORD |
52 | + |
53 | +sudo -E puppet --templatedir "${HOME}/pkgme-service/dev_config/templates" "${HOME}/pkgme-service/dev_config/manifests/pkgme_service.pp" |
54 | |
55 | === added directory 'dev_config/manifests' |
56 | === added file 'dev_config/manifests/pkgme_service.pp' |
57 | --- dev_config/manifests/pkgme_service.pp 1970-01-01 00:00:00 +0000 |
58 | +++ dev_config/manifests/pkgme_service.pp 2012-01-18 23:50:28 +0000 |
59 | @@ -0,0 +1,167 @@ |
60 | +$rabbit_user = "pkgme" |
61 | +$rabbit_vhost = "pkgme-service" |
62 | +$rabbit_host = "localhost" |
63 | +$rabbit_port = "5672" |
64 | + |
65 | +$wsgi_user = "pkgme" |
66 | +$wsgi_processes = 2 |
67 | +$wsgi_threads = 2 |
68 | + |
69 | +$postgres_user = "pkgme" |
70 | +$postgres_database = "pkgme_service" |
71 | +$postgres_host = "localhost" |
72 | +$postgres_port = "5432" |
73 | + |
74 | + |
75 | +group {"pkgme": |
76 | + ensure => 'present', |
77 | +} |
78 | + |
79 | +user {"pkgme": |
80 | + comment => 'This user was created by Puppet', |
81 | + ensure => 'present', |
82 | + gid => 'pkgme', |
83 | + require => Group["pkgme"], |
84 | +} |
85 | + |
86 | +exec { "psql -c \"create user $postgres_user with password '$::postgres_password'\"": |
87 | + path => ["/usr/bin", "/bin"], |
88 | + user => "postgres", |
89 | + unless => "psql -c \"select usename from pg_user where usename = '$postgres_user'\" | grep -q $postgres_user", |
90 | +} |
91 | + |
92 | +exec { "psql -c 'create database $postgres_database owner $postgres_user'": |
93 | + path => ["/usr/bin", "/bin"], |
94 | + user => "postgres", |
95 | + require => Exec["psql -c \"create user $postgres_user with password '$::postgres_password'\""], |
96 | + unless => "psql -c \"select datname from pg_database where datname = '$postgres_database'\" | grep -q $postgres_database", |
97 | +} |
98 | + |
99 | +exec { "psql -c 'grant all privileges on database $postgres_database to $postgres_user'": |
100 | + path => ["/usr/bin"], |
101 | + user => "postgres", |
102 | + require => Exec["psql -c 'create database $postgres_database owner $postgres_user'"], |
103 | +} |
104 | + |
105 | +file { "$::basedir/django_project/production_credentials.cfg": |
106 | + content => template("production_credentials.cfg.erb"), |
107 | + owner => "pkgme", |
108 | + group => "pkgme", |
109 | + mode => 640, |
110 | + require => [ |
111 | + Group["pkgme"], |
112 | + Exec["psql -c 'grant all privileges on database $postgres_database to $postgres_user'"], |
113 | + ], |
114 | +} |
115 | + |
116 | +file { "$::basedir/django_project/production_paths.py": |
117 | + content => template("production_paths.py.erb"), |
118 | + owner => root, |
119 | + group => "pkgme", |
120 | + mode => 644, |
121 | + require => Group["pkgme"], |
122 | +} |
123 | + |
124 | +file { "$::basedir/pkgme.log": |
125 | + owner => "pkgme", |
126 | + group => "pkgme", |
127 | + mode => 644, |
128 | + ensure => present, |
129 | +} |
130 | + |
131 | +exec { "python ./manage.py syncdb --noinput": |
132 | + cwd => "$::basedir/django_project", |
133 | + path => ["/usr/bin"], |
134 | + user => "pkgme", |
135 | + environment => "HOME=$::basedir", |
136 | + require => [ |
137 | + File["$::basedir/django_project/production_credentials.cfg"], |
138 | + File["$::basedir/django_project/production_paths.py"], |
139 | + File["$::basedir/pkgme.log"], |
140 | + ], |
141 | + logoutput => on_failure, |
142 | + loglevel => err, |
143 | +} |
144 | + |
145 | +exec { "python ./manage.py migrate": |
146 | + cwd => "$::basedir/django_project", |
147 | + path => ["/usr/bin"], |
148 | + user => "pkgme", |
149 | + environment => "HOME=$::basedir", |
150 | + require => Exec["python ./manage.py syncdb --noinput"], |
151 | +} |
152 | + |
153 | +#exec { "python ./manage.py collectstatic --noinput": |
154 | +# cwd => "$::basedir/django_project", |
155 | +# path => ["/usr/bin"], |
156 | +# require => [ |
157 | +# File["$::basedir/django_project/production_credentials.cfg"], |
158 | +# File["$::basedir/django_project/production_paths.py"], |
159 | +# ], |
160 | +#} |
161 | + |
162 | +file { "$::basedir/django_project/django.wsgi": |
163 | + content => template("django.wsgi.erb"), |
164 | + require => [ |
165 | + File["$::basedir/django_project/production_credentials.cfg"], |
166 | + File["$::basedir/django_project/production_paths.py"], |
167 | + ] |
168 | +} |
169 | + |
170 | +file { "/etc/apache2/httpd.conf": |
171 | + content => template("httpd.conf.erb"), |
172 | +} |
173 | + |
174 | +exec { "/etc/init.d/apache2 reload": |
175 | + user => root, |
176 | + require => [ |
177 | + File["/etc/apache2/httpd.conf"], |
178 | + File["$::basedir/django_project/django.wsgi"], |
179 | + ], |
180 | +} |
181 | + |
182 | +$celeryd_default_file = "/etc/default/pkgme-service-celeryd" |
183 | + |
184 | +file { "/etc/init.d/pkgme-service-celeryd": |
185 | + content => template("celeryd.initscript.erb"), |
186 | + mode => 755, |
187 | + owner => root, |
188 | +} |
189 | + |
190 | +file { $celeryd_default_file: |
191 | + content => template("celeryd.default.erb"), |
192 | + owner => root, |
193 | + mode => 644, |
194 | +} |
195 | + |
196 | +exec { "rabbitmqctl add_user $rabbit_user $::rabbit_password": |
197 | + path => ["/usr/sbin", "/bin", "/usr/bin"], |
198 | + user => root, |
199 | + unless => "rabbitmqctl list_users | grep -q \"\\<$rabbit_user\\>\"", |
200 | +} |
201 | + |
202 | +exec { "rabbitmqctl add_vhost $rabbit_vhost": |
203 | + path => ["/usr/sbin", "/bin", "/usr/bin"], |
204 | + user => root, |
205 | + unless => "rabbitmqctl list_vhosts | grep -q \"\\<$rabbit_vhost\\>\"", |
206 | +} |
207 | + |
208 | +exec { "rabbitmqctl set_permissions -p $rabbit_vhost $rabbit_user .\\* .\\* .\\*": |
209 | + path => ["/usr/sbin", "/bin", "/usr/bin"], |
210 | + user => root, |
211 | + require => [ |
212 | + Exec["rabbitmqctl add_user $rabbit_user $::rabbit_password"], |
213 | + Exec["rabbitmqctl add_vhost $rabbit_vhost"], |
214 | + ], |
215 | + unless => "rabbitmqctl list_permissions -p $rabbit_vhost | grep -q \"^\\<$rabbit_user\\>\"", |
216 | +} |
217 | + |
218 | +service { "pkgme-service-celeryd": |
219 | + ensure => running, |
220 | + hasrestart => true, |
221 | + hasstatus => true, |
222 | + subscribe => [ |
223 | + File['/etc/init.d/pkgme-service-celeryd'], |
224 | + File['/etc/default/pkgme-service-celeryd'], |
225 | + ], |
226 | +} |
227 | |
228 | === added directory 'dev_config/templates' |
229 | === added file 'dev_config/templates/celeryd.default.erb' |
230 | --- dev_config/templates/celeryd.default.erb 1970-01-01 00:00:00 +0000 |
231 | +++ dev_config/templates/celeryd.default.erb 2012-01-18 23:50:28 +0000 |
232 | @@ -0,0 +1,16 @@ |
233 | +# 0 = start on boot; 1 = don't start on boot |
234 | +NO_START="0" |
235 | + |
236 | +CELERYD_CHDIR="<%= basedir %>" |
237 | + |
238 | +CELERYD="$CELERYD_CHDIR/django_project/manage.py celeryd" |
239 | + |
240 | +CELERYD_LOG_FILE="<%= basedir %>/celery.log" |
241 | + |
242 | +CELERYD_PID_FILE="<%= basedir %>/celery.pid" |
243 | + |
244 | +CELERYD_USER="pkgme" |
245 | + |
246 | +CELERYD_GROUP="pkgme" |
247 | + |
248 | +export PYTHONPATH=<%= basedir %>:<%= basedir %>/src:<%= basedir %>/sourcecode/pkgme:<%= basedir %>/sourcecode/pkgme-binary |
249 | |
250 | === added file 'dev_config/templates/celeryd.initscript.erb' |
251 | --- dev_config/templates/celeryd.initscript.erb 1970-01-01 00:00:00 +0000 |
252 | +++ dev_config/templates/celeryd.initscript.erb 2012-01-18 23:50:28 +0000 |
253 | @@ -0,0 +1,247 @@ |
254 | +#!/bin/bash -e |
255 | +# ============================================ |
256 | +# celeryd - Starts the Celery worker daemon. |
257 | +# ============================================ |
258 | +# |
259 | +# :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status} |
260 | +# |
261 | +# :Configuration file: /etc/default/celeryd |
262 | +# |
263 | +# To configure celeryd you probably need to tell it where to chdir. |
264 | +# |
265 | +# EXAMPLE CONFIGURATION |
266 | +# ===================== |
267 | +# |
268 | +# this is an example configuration for a Python project: |
269 | +# |
270 | +# /etc/default/celeryd: |
271 | +# |
272 | +# # Where to chdir at start. |
273 | +# CELERYD_CHDIR="/opt/Myproject/" |
274 | +# |
275 | +# # Extra arguments to celeryd |
276 | +# CELERYD_OPTS="--time-limit=300" |
277 | +# |
278 | +# # Name of the celery config module.# |
279 | +# CELERY_CONFIG_MODULE="celeryconfig" |
280 | +# |
281 | +# EXAMPLE DJANGO CONFIGURATION |
282 | +# ============================ |
283 | +# |
284 | +# # Where the Django project is. |
285 | +# CELERYD_CHDIR="/opt/Project/" |
286 | +# |
287 | +# # Name of the projects settings module. |
288 | +# export DJANGO_SETTINGS_MODULE="settings" |
289 | +# |
290 | +# # Path to celeryd |
291 | +# CELERYD="/opt/Project/manage.py" |
292 | +# |
293 | +# # Extra arguments to manage.py |
294 | +# CELERYD_OPTS="celeryd" |
295 | +# |
296 | +# AVAILABLE OPTIONS |
297 | +# ================= |
298 | +# |
299 | +# * CELERYD_OPTS |
300 | +# Additional arguments to celeryd, see `celeryd --help` for a list. |
301 | +# |
302 | +# * CELERYD_CHDIR |
303 | +# Path to chdir at start. Default is to stay in the current directory. |
304 | +# |
305 | +# * CELERYD_PID_FILE |
306 | +# Full path to the pidfile. Default is /var/run/celeryd.pid. |
307 | +# |
308 | +# * CELERYD_LOG_FILE |
309 | +# Full path to the celeryd logfile. Default is /var/log/celeryd.log |
310 | +# |
311 | +# * CELERYD_LOG_LEVEL |
312 | +# Log level to use for celeryd. Default is INFO. |
313 | +# |
314 | +# * CELERYD |
315 | +# Path to the celeryd program. Default is `celeryd`. |
316 | +# You can point this to an virtualenv, or even use manage.py for django. |
317 | +# |
318 | +# * CELERYD_USER |
319 | +# User to run celeryd as. Default is current user. |
320 | +# |
321 | +# * CELERYD_GROUP |
322 | +# Group to run celeryd as. Default is current user. |
323 | +# |
324 | +# * VIRTUALENV |
325 | +# Full path to the virtualenv environment to activate. Default is none. |
326 | + |
327 | +### BEGIN INIT INFO |
328 | +# Provides: celeryd |
329 | +# Required-Start: $network $local_fs $remote_fs |
330 | +# Required-Stop: $network $local_fs $remote_fs |
331 | +# Default-Start: 2 3 4 5 |
332 | +# Default-Stop: 0 1 6 |
333 | +# Short-Description: celery task worker daemon |
334 | +### END INIT INFO |
335 | + |
336 | +set -e |
337 | + |
338 | +DEFAULT_CELERYD="/usr/bin/celeryd" |
339 | + |
340 | +# /etc/init.d/ssh: start and stop the celery task worker daemon. |
341 | + |
342 | +. /lib/lsb/init-functions |
343 | + |
344 | +DEFAULT_FILE="<%= celeryd_default_file %>" |
345 | + |
346 | +if test -f "$DEFAULT_FILE"; then |
347 | + . "$DEFAULT_FILE" |
348 | +fi |
349 | + |
350 | +if [ "$NO_START" != "0" -a "$1" != "stop" ]; then |
351 | + if [ "$VERBOSE" != "no" ]; then |
352 | + log_warning_msg "Not starting celeryd - edit $DEFAULT_FILE and change NO_START to be 0." |
353 | + fi |
354 | + exit 0 |
355 | +fi |
356 | + |
357 | + |
358 | +CELERYD_LOG_FILE=${CELERYD_LOG_FILE:-${CELERYD_LOGFILE:-"/var/log/celeryd.log"}} |
359 | +CELERYD_PID_FILE=${CELERYD_PID_FILE:-${CELERYD_PIDFILE:-"/var/run/celeryd.pid"}} |
360 | +CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-"INFO"}} |
361 | + |
362 | +CELERYD=${CELERYD:-$DEFAULT_CELERYD} |
363 | + |
364 | +export CELERY_LOADER |
365 | + |
366 | +CELERYD_OPTS="$CELERYD_OPTS -f $CELERYD_LOG_FILE -l $CELERYD_LOG_LEVEL" |
367 | + |
368 | +if [ -n "$2" ]; then |
369 | + CELERYD_OPTS="$CELERYD_OPTS $2" |
370 | +fi |
371 | + |
372 | +mkdir -p $(dirname "$CELERYD_PID_FILE") |
373 | +mkdir -p $(dirname "$CELERYD_LOG_FILE") |
374 | + |
375 | +# Extra start-stop-daemon options, like user/group. |
376 | +if [ -n "$CELERYD_USER" ]; then |
377 | + DAEMON_OPTS="$DAEMON_OPTS --chuid $CELERYD_USER" |
378 | + chown "$CELERYD_USER" $(dirname "$CELERYD_LOG_FILE") |
379 | +fi |
380 | +if [ -n "$CELERYD_GROUP" ]; then |
381 | + DAEMON_OPTS="$DAEMON_OPTS --group $CELERYD_GROUP" |
382 | +fi |
383 | + |
384 | +if [ -n "$CELERYD_CHDIR" ]; then |
385 | + DAEMON_OPTS="$DAEMON_OPTS --chdir $CELERYD_CHDIR" |
386 | +fi |
387 | + |
388 | + |
389 | +# Are we running from init? |
390 | +run_by_init() { |
391 | + ([ "$previous" ] && [ "$runlevel" ]) || [ "$runlevel" = S ] |
392 | +} |
393 | + |
394 | + |
395 | +check_dev_null() { |
396 | + if [ ! -c /dev/null ]; then |
397 | + if [ "$1" = log_end_msg ]; then |
398 | + log_end_msg 1 || true |
399 | + fi |
400 | + if ! run_by_init; then |
401 | + log_action_msg "/dev/null is not a character device!" |
402 | + fi |
403 | + exit 1 |
404 | + fi |
405 | +} |
406 | + |
407 | + |
408 | +export PATH="${PATH:+$PATH:}/usr/sbin:/sbin" |
409 | + |
410 | + |
411 | +stop_worker () { |
412 | + cmd="start-stop-daemon --stop \ |
413 | + --quiet \ |
414 | + $* \ |
415 | + --pidfile $CELERYD_PID_FILE" |
416 | + if $cmd; then |
417 | + log_end_msg 0 |
418 | + else |
419 | + log_end_msg 1 |
420 | + fi |
421 | +} |
422 | + |
423 | +start_worker () { |
424 | + cmd="start-stop-daemon --start $DAEMON_OPTS \ |
425 | + --quiet \ |
426 | + --oknodo \ |
427 | + --background \ |
428 | + --make-pidfile \ |
429 | + $* \ |
430 | + --pidfile $CELERYD_PID_FILE |
431 | + --exec $CELERYD -- $CELERYD_OPTS" |
432 | + if [ -n "$VIRTUALENV" ]; then |
433 | + source $VIRTUALENV/bin/activate |
434 | + fi |
435 | + if $cmd; then |
436 | + log_end_msg 0 |
437 | + else |
438 | + log_end_msg 1 |
439 | + fi |
440 | +} |
441 | + |
442 | + |
443 | + |
444 | +case "$1" in |
445 | + start) |
446 | + check_dev_null |
447 | + log_daemon_msg "Starting celery task worker server" "celeryd" |
448 | + start_worker |
449 | + ;; |
450 | + stop) |
451 | + log_daemon_msg "Stopping celery task worker server" "celeryd" |
452 | + stop_worker --oknodo |
453 | + ;; |
454 | + |
455 | + reload|force-reload) |
456 | + echo "Use start+stop" |
457 | + ;; |
458 | + |
459 | + restart) |
460 | + log_daemon_msg "Restarting celery task worker server" "celeryd" |
461 | + stop_worker --oknodo --retry 30 |
462 | + check_dev_null log_end_msg |
463 | + start_worker |
464 | + ;; |
465 | + |
466 | + try-restart) |
467 | + log_daemon_msg "Restarting celery task worker server" "celeryd" |
468 | + set +e |
469 | + stop_worker --retry 30 |
470 | + RET="$?" |
471 | + set -e |
472 | + case $RET in |
473 | + 0) |
474 | + # old daemon stopped |
475 | + check_dev_null log_end_msg |
476 | + start_worker |
477 | + ;; |
478 | + 1) |
479 | + # daemon not running |
480 | + log_progress_msg "(not running)" |
481 | + log_end_msg 0 |
482 | + ;; |
483 | + *) |
484 | + # failed to stop |
485 | + log_progress_msg "(failed to stop)" |
486 | + log_end_msg 1 |
487 | + ;; |
488 | + esac |
489 | + ;; |
490 | + |
491 | + status) |
492 | + status_of_proc -p $CELERYD_PID_FILE $CELERYD celeryd && exit 0 || exit $? |
493 | + ;; |
494 | + |
495 | + *) |
496 | + log_action_msg "Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status}" |
497 | + exit 1 |
498 | +esac |
499 | + |
500 | +exit 0 |
501 | |
502 | === added file 'dev_config/templates/django.wsgi.erb' |
503 | --- dev_config/templates/django.wsgi.erb 1970-01-01 00:00:00 +0000 |
504 | +++ dev_config/templates/django.wsgi.erb 2012-01-18 23:50:28 +0000 |
505 | @@ -0,0 +1,19 @@ |
506 | +import os |
507 | +import sys |
508 | + |
509 | +extra_paths = [ |
510 | + '<%= basedir %>', |
511 | + '<%= basedir %>/src', |
512 | + '<%= basedir %>/django_project', |
513 | + '<%= basedir %>/sourcecode/pkgme', |
514 | + '<%= basedir %>/sourcecode/pkgme-binary', |
515 | +] |
516 | +for path in extra_paths: |
517 | + if path not in sys.path: |
518 | + sys.path.append(path) |
519 | + |
520 | +os.environ['PKGME_LOG_DIR'] = '<%= basedir %>' |
521 | +os.environ['DJANGO_SETTINGS_MODULE'] = 'django_project.settings' |
522 | + |
523 | +import django.core.handlers.wsgi |
524 | +application = django.core.handlers.wsgi.WSGIHandler() |
525 | |
526 | === added file 'dev_config/templates/httpd.conf.erb' |
527 | --- dev_config/templates/httpd.conf.erb 1970-01-01 00:00:00 +0000 |
528 | +++ dev_config/templates/httpd.conf.erb 2012-01-18 23:50:28 +0000 |
529 | @@ -0,0 +1,12 @@ |
530 | +#Alias /assets/ /srv/<%= basedir %>/project/static/ |
531 | +# |
532 | +#<Directory /srv/<%= basedir %>/project/static> |
533 | +#Order deny,allow |
534 | +#Allow from all |
535 | +#</Directory> |
536 | + |
537 | +WSGIDaemonProcess <%= basedir %> user=<%= wsgi_user %> processes=<%= wsgi_processes %> threads=<%= wsgi_threads %> |
538 | +WSGIProcessGroup <%= basedir %> |
539 | +WSGIPassAuthorization On |
540 | + |
541 | +WSGIScriptAlias / <%= basedir %>/django_project/django.wsgi |
542 | |
543 | === added file 'dev_config/templates/production_credentials.cfg.erb' |
544 | --- dev_config/templates/production_credentials.cfg.erb 1970-01-01 00:00:00 +0000 |
545 | +++ dev_config/templates/production_credentials.cfg.erb 2012-01-18 23:50:28 +0000 |
546 | @@ -0,0 +1,27 @@ |
547 | +[django] |
548 | +databases = databases |
549 | +secret_key = <%= django_secret_key %> |
550 | + |
551 | +[databases] |
552 | +default = default_database |
553 | + |
554 | +[default_database] |
555 | +engine = django.db.backends.postgresql_psycopg2 |
556 | +name = <%= postgres_database %> |
557 | +user = <%= postgres_user %> |
558 | +password = <%= postgres_password %> |
559 | +host = <%= postgres_host %> |
560 | +port = <%= postgres_port %> |
561 | + |
562 | +[celery] |
563 | +broker_host = <%= rabbit_host %> |
564 | +broker_port = <%= rabbit_port %> |
565 | +broker_user = <%= rabbit_user %> |
566 | +broker_password = <%= rabbit_password %> |
567 | +broker_vhost = <%= rabbit_vhost %> |
568 | + |
569 | +[djpkgme] |
570 | +myapps_consumer_secret = |
571 | +myapps_consumer_key = |
572 | +myapps_token = |
573 | +myapps_token_secret = |
574 | |
575 | === added file 'dev_config/templates/production_paths.py.erb' |
576 | --- dev_config/templates/production_paths.py.erb 1970-01-01 00:00:00 +0000 |
577 | +++ dev_config/templates/production_paths.py.erb 2012-01-18 23:50:28 +0000 |
578 | @@ -0,0 +1,13 @@ |
579 | +import os |
580 | +import sys |
581 | + |
582 | +for path in [ |
583 | + '<%= basedir %>/sourcecode/pkgme-binary', |
584 | + '<%= basedir %>/sourcecode/pkgme', |
585 | + '<%= basedir %>/django_project', |
586 | + '<%= basedir %>/src', |
587 | + '<%= basedir %>', |
588 | + ]: |
589 | + sys.path.insert(0, path) |
590 | + |
591 | +os.environ['PKGME_LOG_DIR'] = '<%= basedir %>' |
592 | |
593 | === modified file 'fabtasks/__init__.py' |
594 | --- fabtasks/__init__.py 2011-08-29 23:50:07 +0000 |
595 | +++ fabtasks/__init__.py 2012-01-18 23:50:28 +0000 |
596 | @@ -1,2 +1,3 @@ |
597 | from .bootstrap import * |
598 | +from .deploy import * |
599 | from .django import * |
600 | |
601 | === added file 'fabtasks/deploy.py' |
602 | --- fabtasks/deploy.py 1970-01-01 00:00:00 +0000 |
603 | +++ fabtasks/deploy.py 2012-01-18 23:50:28 +0000 |
604 | @@ -0,0 +1,256 @@ |
605 | +from datetime import datetime |
606 | +import os |
607 | +import subprocess |
608 | +import time |
609 | + |
610 | +from fabric.api import env, run |
611 | +from fabric.utils import abort, puts |
612 | + |
613 | +from boto import ec2 |
614 | + |
615 | + |
616 | +RELEASE = 'lucid' |
617 | +ARCH = 'amd64' |
618 | +FS_TYPE = 'ebs' |
619 | +INSTANCE_TYPE = 't1.micro' |
620 | +USERNAME = 'ubuntu' |
621 | +REGION_NAME = 'us-east-1' |
622 | + |
623 | +NAME_PREFIX = "fab/pkgme-service" |
624 | + |
625 | + |
626 | +def _get_aws_credentials(): |
627 | + """Get the AWS credentials for the user from ~/.ec2/aws_id. |
628 | + |
629 | + :returns: a tuple of access key id, secret access key |
630 | + """ |
631 | + key_id = None |
632 | + secret_access_key = None |
633 | + with open(os.path.expanduser("~/.ec2/aws_id")) as f: |
634 | + for i, line in enumerate(f.readlines()): |
635 | + if i == 0: |
636 | + key_id = line.strip() |
637 | + if i == 1: |
638 | + secret_access_key = line.strip() |
639 | + if key_id is None: |
640 | + raise AssertionError("Missing key id in ~/.ec2/aws_id") |
641 | + if secret_access_key is None: |
642 | + raise AssertionError("Missing secret access key in ~/.ec2/aws_id") |
643 | + return key_id, secret_access_key |
644 | + |
645 | + |
646 | +def _get_ami_id(region_name): |
647 | + """Get the AMI to use for a particular region. |
648 | + |
649 | + This consults the ubuntu-cloudimg-query tool to find out the best |
650 | + AMI to use for a particular region. |
651 | + |
652 | + :returns: the ami id (as a string) |
653 | + """ |
654 | + proc = subprocess.Popen( |
655 | + ['ubuntu-cloudimg-query', RELEASE, ARCH, FS_TYPE, region_name], |
656 | + stdout=subprocess.PIPE) |
657 | + stdout, _ = proc.communicate() |
658 | + if proc.returncode != 0: |
659 | + raise AssertionError("calling ubuntu-cloudimg-query failed") |
660 | + return stdout.strip() |
661 | + |
662 | + |
663 | +def _new_ec2_instance(keypair): |
664 | + """Starts a new ec2 instance, giving the specified keypair access. |
665 | + |
666 | + This will use the AWS credentials from ~/.ec2/aws_id, and the |
667 | + AMI recommended by ubuntu-cloudimg-query. |
668 | + |
669 | + :returns: the boto Instance object of the launched instance. It |
670 | + will be in a state where it can be accessed over ssh. |
671 | + """ |
672 | + key_id, secret_access_key = _get_aws_credentials() |
673 | + ami_id = _get_ami_id(REGION_NAME) |
674 | + conn = ec2.connect_to_region(REGION_NAME, aws_access_key_id=key_id, |
675 | + aws_secret_access_key=secret_access_key) |
676 | + image = conn.get_image(ami_id) |
677 | + reservation = image.run(instance_type=INSTANCE_TYPE, key_name=keypair) |
678 | + instance = reservation.instances[0] |
679 | + puts("Waiting for instance %s to start..." % instance.id) |
680 | + while True: |
681 | + time.sleep(10) |
682 | + instance.update() |
683 | + if instance.state != 'pending': |
684 | + break |
685 | + if instance.state != 'running': |
686 | + raise AssertionError("Instance failed to start") |
687 | + puts("Instance started as %s" % instance.dns_name) |
688 | + puts("Waiting for ssh to come up") |
689 | + # FIXME: use something better than a sleep to determine this. |
690 | + time.sleep(30) |
691 | + now = datetime.utcnow() |
692 | + instance.add_tag("name", "%s/%s" % (NAME_PREFIX, now.isoformat())) |
693 | + return instance |
694 | + |
695 | + |
696 | +def deploy_to_ec2(branch="lp:pkgme-service", use_staging_deps=True, pkgme_branch="lp:pkgme", pkgme_binary_branch="lp:pkgme-binary", keypair='ec2-keypair'): |
697 | + """Deploy to a new ec2 instance. |
698 | + |
699 | + This command will spin up an ec2 instance, and deploy to it. |
700 | + |
701 | + To run this command you must first set up an ec2 account. |
702 | + |
703 | + http://aws.amazon.com/ec2/ |
704 | + |
705 | + Once you have that create access keys for this to use. Go to |
706 | + |
707 | + https://aws-portal.amazon.com/gp/aws/developer/account?ie=UTF8&action=access-key |
708 | + |
709 | + and create an access key. Then create a file at ~/.ec2/aws_id and |
710 | + put the "Access Key ID" on the first line, and the "Secret Access Key" on |
711 | + the second line (with nothing else on either line.) |
712 | + |
713 | + Next you will need an ec2 keypair. Go to the EC2 console at |
714 | + |
715 | + https://console.aws.amazon.com/ec2/home?region=us-east-1 |
716 | + |
717 | + click on "Key Pairs" and then "Create Key Pair", name the keypair "ec2-keypair". |
718 | + Save the resulting file as |
719 | + |
720 | + ~/.ec2/ec2-keypair.pem |
721 | + |
722 | + Now you are ready to deploy, so run |
723 | + |
724 | + fab deploy_to_ec2:keypair=ec2-keypair -i ~/.ec2/ec2-keypair.pem |
725 | + |
726 | + and wait. |
727 | + |
728 | + Note that you will be responsible for terminating the instance after |
729 | + use (see the destroy_ec2_instances command.) |
730 | + |
731 | + You can also specify the following arguments: |
732 | + |
733 | + * branch: the branch to deploy, defaults to lp:pkgme-service. To |
734 | + test some in-progress work push to lp:~you/pkgme-service/something |
735 | + and then specify that as the branch. |
736 | + |
737 | + * pkgme_branch: the branch of pkgme to deploy, defaults to |
738 | + lp:pkgme. |
739 | + |
740 | + * pkgme_binary_branch: the branch of pkgme-binary to deploy, |
741 | + defaults to lp:pkgme-binary. |
742 | + |
743 | + * use_staging_deps: whether to use the staging PPA for |
744 | + dependencies, as well as the production one, defaults to True. |
745 | + |
746 | + Arguments are all specified by attaching them to the command name, e.g. |
747 | + |
748 | + fab deploy_to_ec2:keypair=ec2-keypair,branch=lp:~me/pkgme-service/something |
749 | + """ |
750 | + instance = _new_ec2_instance(keypair) |
751 | + _set_instance_as_host(instance) |
752 | + deploy_to_existing(branch=branch, use_staging_deps=use_staging_deps, pkgme_branch=pkgme_branch, pkgme_binary_branch=pkgme_binary_branch) |
753 | + |
754 | + |
755 | +def _set_instance_as_host(instance): |
756 | + """Set the host to be acted on by fab to this instance.""" |
757 | + env.host_string = "%s@%s:22" % (USERNAME, instance.dns_name) |
758 | + |
759 | + |
760 | +def deploy_to_existing(branch="lp:pkgme-service", use_staging_deps=True, pkgme_branch="lp:pkgme", pkgme_binary_branch="lp:pkgme-binary"): |
761 | + """Deploy to an existing instance. |
762 | + |
763 | + This command will deploy to an existing instance. Don't use it on an |
764 | + instance you care about as it may overwrite anything. |
765 | + |
766 | + To specify the host to deploy to use the -H option of fab: |
767 | + |
768 | + fab -H ubuntu@<host> deploy_to_existing |
769 | + |
770 | + You can also specify the following arguments: |
771 | + |
772 | + * branch: the branch to deploy, defaults to lp:pkgme-service. To |
773 | + test some in-progress work push to lp:~you/pkgme-service/something |
774 | + and then specify that as the branch. |
775 | + |
776 | + * pkgme_branch: the branch of pkgme to deploy, defaults to |
777 | + lp:pkgme. |
778 | + |
779 | + * pkgme_binary_branch: the branch of pkgme-binary to deploy, |
780 | + defaults to lp:pkgme-binary. |
781 | + |
782 | + * use_staging_deps: whether to use the staging PPA for |
783 | + dependencies, as well as the production one, defaults to True. |
784 | + |
785 | + Arguments are all specified by attaching them to the command name, e.g. |
786 | + |
787 | + fab deploy_to_ec2:keypair=ec2-keypair,branch=lp:~me/pkgme-service/something |
788 | + """ |
789 | + # Get the puppet used by IS |
790 | + run('sudo add-apt-repository ppa:canonical-sysadmins/puppet') |
791 | + # Add our dependency PPA |
792 | + run('sudo add-apt-repository ppa:canonical-ca-hackers/production') |
793 | + if use_staging_deps: |
794 | + # Add our dependency staging PPA |
795 | + run('sudo add-apt-repository ppa:canonical-ca-hackers/staging') |
796 | + run('sudo apt-get update -q') |
797 | + # Upgrade the base system in case we are shipping any updates in |
798 | + # our PPAs |
799 | + run('sudo apt-get dist-upgrade -q -y --force-yes') |
800 | + # Avoid a debconf note when installing rabbitmq-server on lucid |
801 | + run('echo "rabbitmq-server rabbitmq-server/upgrade_previous note" | sudo debconf-set-selections') |
802 | + # Install the dependencies needed to get puppet going |
803 | + # TODO: move the rest of the dependencies to puppet |
804 | + run('sudo apt-get install -q -y --force-yes pkgme-service-dependencies bzr apache2 libapache2-mod-wsgi rabbitmq-server postgresql-8.4 puppet') |
805 | + # Grab the branches |
806 | + # TODO: investigate re-using IS' config-manager config |
807 | + run('bzr branch -q %s pkgme-service' % branch) |
808 | + run('bzr branch -q %s pkgme-service/sourcecode/pkgme' % pkgme_branch) |
809 | + run('bzr branch -q %s pkgme-service/sourcecode/pkgme-binary' % pkgme_binary_branch) |
810 | + run('cd pkgme-service/sourcecode/pkgme && python setup.py build') |
811 | + run('cd pkgme-service/sourcecode/pkgme-binary && python setup.py build') |
812 | + # Grab canonical-memento and use it? |
813 | + # Run puppet to set everything else up. |
814 | + run('./pkgme-service/example_config/apply') |
815 | + |
816 | + |
817 | +def _get_started_ec2_instances(): |
818 | + """Get the ec2 instances started from here.""" |
819 | + key_id, secret_access_key = _get_aws_credentials() |
820 | + conn = ec2.connect_to_region(REGION_NAME, aws_access_key_id=key_id, |
821 | + aws_secret_access_key=secret_access_key) |
822 | + for reservation in conn.get_all_instances(): |
823 | + for instance in reservation.instances: |
824 | + if instance.state == 'running': |
825 | + tags = getattr(instance, "tags", {}) |
826 | + if "name" in tags: |
827 | + name = tags["name"] |
828 | + if name.startswith(NAME_PREFIX): |
829 | + yield instance |
830 | + |
831 | + |
832 | +def destroy_ec2_instances(): |
833 | + """Destroy any ec2 instances created by this deployment.""" |
834 | + for instance in _get_started_ec2_instances(): |
835 | + puts("Stopping %s" % instance.id) |
836 | + instance.terminate() |
837 | + |
838 | + |
839 | +def last_ec2_launched(): |
840 | + """Cause other commands to act on the last ec2 instance launched from here. |
841 | + |
842 | + Use this in a list of commands to have the rest of the commands act on the |
843 | + last ec2 instance launched with deploy_to_ec2, e.g. |
844 | + |
845 | + fab -i ~/.ec2/ec2-keypair.pem last_ec2_launched -- ls |
846 | + |
847 | + This will then run ls on that instance. |
848 | + """ |
849 | + def get_date(instance): |
850 | + return instance.tags["name"][len(NAME_PREFIX):] |
851 | + last_instance = None |
852 | + for instance in _get_started_ec2_instances(): |
853 | + if last_instance is None: |
854 | + last_instance = instance |
855 | + else: |
856 | + if get_date(instance) > get_date(last_instance): |
857 | + last_instance = instance |
858 | + if last_instance is None: |
859 | + abort("No instances found.") |
860 | + _set_instance_as_host(last_instance) |