Merge lp:~chad.smith/vmbuilder/jenkins_kvm_azure_netplan_hotplug into lp:vmbuilder

Proposed by Chad Smith
Status: Superseded
Proposed branch: lp:~chad.smith/vmbuilder/jenkins_kvm_azure_netplan_hotplug
Merge into: lp:vmbuilder
Diff against target: 31474 lines (+30578/-0)
176 files modified
azure_config.sh (+95/-0)
base_indicies.sh (+28/-0)
build-juju-local.sh (+108/-0)
builder_config.sh (+78/-0)
checksum.sh (+27/-0)
config/cloud-azure.cfg (+9/-0)
config/cloud-maas.cfg (+11/-0)
config/cloud-maasv2.cfg (+10/-0)
config/cloud-maasv3.cfg (+10/-0)
config/cloud-precise.cfg (+24/-0)
config/cloud-trusty-pp64el.cfg (+13/-0)
config/cloud-trusty.cfg (+26/-0)
config/cloud-vps.cfg (+6/-0)
config/cloud.cfg (+17/-0)
copy_to_final.sh (+52/-0)
create-vhd.sh (+97/-0)
ec2_publisher.sh (+98/-0)
functions/bzr_check.sh (+14/-0)
functions/bzr_commit.sh (+23/-0)
functions/common (+37/-0)
functions/locker (+49/-0)
functions/merge_templates (+53/-0)
functions/mk_template.sh (+41/-0)
functions/retry (+16/-0)
generate-ubuntu-lists.sh (+44/-0)
get_serial.sh (+157/-0)
jenkins/CloudImages_Azure.sh (+162/-0)
jenkins/CloudImages_Base.sh (+96/-0)
jenkins/CloudImages_Base_Release_Delta.sh (+255/-0)
jenkins/CloudImages_Juju.sh (+253/-0)
jenkins/CloudImages_Update_Builder.sh (+68/-0)
jenkins/CloudImages_Vagrant.sh (+232/-0)
jenkins/MAAS_Builder.sh (+171/-0)
jenkins/MAAS_Promotion.sh (+31/-0)
jenkins/MAASv2_Builder.sh (+191/-0)
jenkins/MAASv2_Cleaner.sh (+55/-0)
jenkins/MAASv3_Builder.sh (+67/-0)
jenkins/Promote_Daily.sh (+55/-0)
jenkins/Promote_MAAS_Daily.sh (+48/-0)
jenkins/Publish_EC2.sh (+64/-0)
jenkins/Publish_Results_to_Tracker.sh (+34/-0)
jenkins/README.txt (+1/-0)
jenkins/Test_Azure.sh (+17/-0)
jenkins/build_lib.sh (+33/-0)
jenkins/env-test.sh (+2/-0)
launch_kvm.sh (+222/-0)
maas_config.sh (+75/-0)
make-seed.sh (+147/-0)
overlay.sh (+23/-0)
pylib/changelogger.py (+222/-0)
pylib/changelogger/ChangeLogger.py (+222/-0)
pylib/requests/__init__.py (+77/-0)
pylib/requests/adapters.py (+388/-0)
pylib/requests/api.py (+120/-0)
pylib/requests/auth.py (+193/-0)
pylib/requests/cacert.pem (+5026/-0)
pylib/requests/certs.py (+24/-0)
pylib/requests/compat.py (+115/-0)
pylib/requests/cookies.py (+454/-0)
pylib/requests/exceptions.py (+75/-0)
pylib/requests/hooks.py (+45/-0)
pylib/requests/models.py (+803/-0)
pylib/requests/packages/__init__.py (+3/-0)
pylib/requests/packages/chardet/__init__.py (+32/-0)
pylib/requests/packages/chardet/big5freq.py (+925/-0)
pylib/requests/packages/chardet/big5prober.py (+42/-0)
pylib/requests/packages/chardet/chardetect.py (+46/-0)
pylib/requests/packages/chardet/chardistribution.py (+231/-0)
pylib/requests/packages/chardet/charsetgroupprober.py (+106/-0)
pylib/requests/packages/chardet/charsetprober.py (+62/-0)
pylib/requests/packages/chardet/codingstatemachine.py (+61/-0)
pylib/requests/packages/chardet/compat.py (+34/-0)
pylib/requests/packages/chardet/constants.py (+39/-0)
pylib/requests/packages/chardet/cp949prober.py (+44/-0)
pylib/requests/packages/chardet/escprober.py (+86/-0)
pylib/requests/packages/chardet/escsm.py (+242/-0)
pylib/requests/packages/chardet/eucjpprober.py (+90/-0)
pylib/requests/packages/chardet/euckrfreq.py (+596/-0)
pylib/requests/packages/chardet/euckrprober.py (+42/-0)
pylib/requests/packages/chardet/euctwfreq.py (+428/-0)
pylib/requests/packages/chardet/euctwprober.py (+41/-0)
pylib/requests/packages/chardet/gb2312freq.py (+472/-0)
pylib/requests/packages/chardet/gb2312prober.py (+41/-0)
pylib/requests/packages/chardet/hebrewprober.py (+283/-0)
pylib/requests/packages/chardet/jisfreq.py (+569/-0)
pylib/requests/packages/chardet/jpcntx.py (+219/-0)
pylib/requests/packages/chardet/langbulgarianmodel.py (+229/-0)
pylib/requests/packages/chardet/langcyrillicmodel.py (+329/-0)
pylib/requests/packages/chardet/langgreekmodel.py (+225/-0)
pylib/requests/packages/chardet/langhebrewmodel.py (+201/-0)
pylib/requests/packages/chardet/langhungarianmodel.py (+225/-0)
pylib/requests/packages/chardet/langthaimodel.py (+200/-0)
pylib/requests/packages/chardet/latin1prober.py (+139/-0)
pylib/requests/packages/chardet/mbcharsetprober.py (+86/-0)
pylib/requests/packages/chardet/mbcsgroupprober.py (+54/-0)
pylib/requests/packages/chardet/mbcssm.py (+575/-0)
pylib/requests/packages/chardet/sbcharsetprober.py (+120/-0)
pylib/requests/packages/chardet/sbcsgroupprober.py (+69/-0)
pylib/requests/packages/chardet/sjisprober.py (+91/-0)
pylib/requests/packages/chardet/universaldetector.py (+170/-0)
pylib/requests/packages/chardet/utf8prober.py (+76/-0)
pylib/requests/packages/urllib3/__init__.py (+58/-0)
pylib/requests/packages/urllib3/_collections.py (+205/-0)
pylib/requests/packages/urllib3/connection.py (+204/-0)
pylib/requests/packages/urllib3/connectionpool.py (+710/-0)
pylib/requests/packages/urllib3/contrib/ntlmpool.py (+120/-0)
pylib/requests/packages/urllib3/contrib/pyopenssl.py (+422/-0)
pylib/requests/packages/urllib3/exceptions.py (+126/-0)
pylib/requests/packages/urllib3/fields.py (+177/-0)
pylib/requests/packages/urllib3/filepost.py (+100/-0)
pylib/requests/packages/urllib3/packages/__init__.py (+4/-0)
pylib/requests/packages/urllib3/packages/ordered_dict.py (+260/-0)
pylib/requests/packages/urllib3/packages/six.py (+385/-0)
pylib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py (+13/-0)
pylib/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py (+105/-0)
pylib/requests/packages/urllib3/poolmanager.py (+258/-0)
pylib/requests/packages/urllib3/request.py (+141/-0)
pylib/requests/packages/urllib3/response.py (+308/-0)
pylib/requests/packages/urllib3/util/__init__.py (+27/-0)
pylib/requests/packages/urllib3/util/connection.py (+45/-0)
pylib/requests/packages/urllib3/util/request.py (+68/-0)
pylib/requests/packages/urllib3/util/response.py (+13/-0)
pylib/requests/packages/urllib3/util/ssl_.py (+133/-0)
pylib/requests/packages/urllib3/util/timeout.py (+234/-0)
pylib/requests/packages/urllib3/util/url.py (+162/-0)
pylib/requests/sessions.py (+637/-0)
pylib/requests/status_codes.py (+88/-0)
pylib/requests/structures.py (+127/-0)
pylib/requests/utils.py (+673/-0)
register-vagrant-version.sh (+107/-0)
rss-cleanup.sh (+16/-0)
rss-generate.sh (+103/-0)
should_build.py (+484/-0)
standalone.sh (+303/-0)
templates/default.tmpl (+420/-0)
templates/example-addin.tmpl (+140/-0)
templates/handle-xdeb.py (+15/-0)
templates/img-azure-12.04-addin.tmpl (+47/-0)
templates/img-azure-14.04-addin.tmpl (+58/-0)
templates/img-azure-14.10-addin.tmpl (+60/-0)
templates/img-azure-15.04-addin.tmpl (+59/-0)
templates/img-azure-15.10-addin.tmpl (+50/-0)
templates/img-azure-15.10-docker.tmpl (+25/-0)
templates/img-azure-16.04-addin.tmpl (+58/-0)
templates/img-azure-16.04-docker.tmpl (+8/-0)
templates/img-azure-16.10-addin.tmpl (+58/-0)
templates/img-azure-16.10-docker.tmpl (+8/-0)
templates/img-azure-17.04-addin.tmpl (+58/-0)
templates/img-azure-17.10-addin.tmpl (+58/-0)
templates/img-azure-18.04-addin.tmpl (+58/-0)
templates/img-azure-extra.tmpl (+19/-0)
templates/img-azure.tmpl (+354/-0)
templates/img-build.tmpl (+135/-0)
templates/img-extra-nets.tmpl (+141/-0)
templates/img-juju-addin.tmpl (+250/-0)
templates/img-juju.tmpl (+455/-0)
templates/img-maas.tmpl (+96/-0)
templates/img-maasv2.tmpl (+137/-0)
templates/img-maasv3.tmpl (+85/-0)
templates/img-smartcloud.tmpl (+112/-0)
templates/img-update.tmpl (+292/-0)
templates/img-vagrant.tmpl (+294/-0)
templates/img-vps.tmpl (+67/-0)
tests/azure-node-settings-tool.py (+111/-0)
tests/azure.sh (+286/-0)
tests/decider.py (+285/-0)
tests/jenkins-ssh (+68/-0)
tests/passless-sudoifer (+57/-0)
tests/run-azure.sh (+29/-0)
tests/test-azure.py (+233/-0)
tests/tracker.py (+187/-0)
tracker.sh (+16/-0)
tweet.sh (+44/-0)
ubuntu-adj2version (+53/-0)
update_release_directory.sh (+17/-0)
wait_package.sh (+27/-0)
To merge this branch: bzr merge lp:~chad.smith/vmbuilder/jenkins_kvm_azure_netplan_hotplug
Reviewer Review Type Date Requested Status
VMBuilder Pending
Review via email: mp+347174@code.launchpad.net

This proposal has been superseded by a proposal from 2018-05-31.

Commit message

Update Azure's nic hotplug script to use netplan if available instead of ENI

Also avoid appending unnecessary include directives in
/etc/network/interfaces on netplan-enabled systems.

Description of the change

WIP: I think I targeted the wrong branch, will resubmit tomorrow
diff should be http://paste.ubuntu.com/p/Kx8YrXv7cp/

Azure images deliver a script /usr/local/sbin/ephemeral_eth.sh which is called from udev add rules on nic hotplug events for nics named eth[1-9]*. This script was created when netplan wasn't a 'thing' and, as such, only cared about /etc/network/interfaces.

In Bionic and later, cloud-init writes a fallback interface config in /etc/netplan/50-cloud-init.yaml configuration dhcp on eth0 as a primary/mandatory NIC (optional: false). So boot will wait on that device to come up.

This changeset adds a test whether netplan command exists in ephemeral_eth.sh:
  - If netplan is present, a separate /etc/netplan/90-hotplug-<nicname>.yaml will be generated for each network device eth[1-9]n* that is attached after cloud-init's initial network configuration.
  - After the netplan yaml is created, call 'netplan apply' to bring up that device with dchp. The netplan config will specify that these nics are "optional: true" so that subsequent boots will not wait on them to come up in case they are subsequently detached.

Attaching nics in Azure is done through their UI or API. The attaching/detaching operation in Azure requires an instance to be stopped before attach/detach and started after the operation.

  Potental gap:
     There is no attempt to clean up old netplan yaml files, or to designate a new primary/mandatory nic because this original hotplug script didn't deal with udev rules for hotplug removal of nics (via Azure network interface detachment).
     This could present a minor issue if eth1 is attached (and optional by design) and eth0 gets detached. In that case, systemd may still wait for eth0 to come up because of the mandatory eth0 definition in /etc/netplan/50-cloud-init.yaml.

To post a comment you must log in.
Revision history for this message
Daniel Axtens (daxtens) wrote :

A few things:

1) netplan is the default on Artful too. I think your detection code is right, but your commit message is potentially wrong?

2) If I understand cloud-init and netplan correctly, couldn't you achieve the same effect by just adding this as /etc/netplan/99-azure-hotplug.yaml? Then you could drop ephemeral_eth.sh entirely on Artful and Bionic.

network:
....version: 2
....ethernets:
........ephemeral:
............dhcp4: true
............match:
................driver: hv_netvsc
................name: "eth*"
............optional: true

3) Looking at the code itself, you should probably use /run/netplan for ephemeral files, rather than /etc/netplan. That also solves your cleanup problem.

4) And it's worth knowing that netplan apply will look for network devices that are 'down' and them from their drivers and rebind them. With your approach, netplan apply will be run for each extra device, so if there are 4 extra devices, the first one configured won't be replugged, the second will be replugged once, the third will be replugged twice and so on. This *probably* isn't problematic, but it makes me nervous, especially doing it in rapid succession.

800. By Chad Smith

Revert changes to ephemeral_eth.sh and emit a netplan 90-hotplug-azure.yaml

cloud-init only sets up a network configuration at initial boot pinned to
the original macaddress. If we are building a netplan enabled image,
emit a static netplan yaml which will complement the orignal /etc/netplan/50-cloud-init.yaml fallback definition. If the original eth0 is no longer attached to vm, cloud-init's netplan yaml will not match by macaddress and system will fall through to match the following hotpluggedeth0 definition:

        hotpluggedeth0:
            dhcp4: true
            match:
                driver: hv_netvsc
                name: 'eth0'

801. By Chad Smith

Move /etc/network/interfaces include directive back out of config_udev.

Appended include directive in /etc/network/interfaces needs to exist for
both upstart and udev solutions. So, it can't live exclusively within
config_udev_or_netplan function. It needs to be present on all non-netplan
environments (upstart and ENI), but test we are not a netplan enabled
image before manipulating /etc/network/interfaces.

Unmerged revisions

801. By Chad Smith

Move /etc/network/interfaces include directive back out of config_udev.

Appended include directive in /etc/network/interfaces needs to exist for
both upstart and udev solutions. So, it can't live exclusively within
config_udev_or_netplan function. It needs to be present on all non-netplan
environments (upstart and ENI), but test we are not a netplan enabled
image before manipulating /etc/network/interfaces.

800. By Chad Smith

Revert changes to ephemeral_eth.sh and emit a netplan 90-hotplug-azure.yaml

cloud-init only sets up a network configuration at initial boot pinned to
the original macaddress. If we are building a netplan enabled image,
emit a static netplan yaml which will complement the orignal /etc/netplan/50-cloud-init.yaml fallback definition. If the original eth0 is no longer attached to vm, cloud-init's netplan yaml will not match by macaddress and system will fall through to match the following hotpluggedeth0 definition:

        hotpluggedeth0:
            dhcp4: true
            match:
                driver: hv_netvsc
                name: 'eth0'

799. By Chad Smith

Update Azure's nic hotplug script to use netplan if available instead of ENI

Also avoid appending unnecessary include directives in
/etc/network/interfaces on netplan-enabled systems.

798. By Dan Watkins

Install Azure model assertion in Azure bionic images

797. By Dan Watkins

Install linux-azure in bionic Azure images [a=Odd_Bloke][r=fginther,tribaal]

MP: https://code.launchpad.net/~ubuntu-on-ec2/vmbuilder/jenkins_kvm-oddbloke/+merge/341846

796. By Philip Roche

Merge lp:~ubuntu-on-ec2/vmbuilder/jenkins_kvm-oddbloke into lp:~ubuntu-on-ec2/vmbuilder/jenkins_kvm [a=daniel-thewatkins] [r=fginther,philroche]

Use HTTPS for Vagrant box redirects (LP: #1754948)

MP: https://code.launchpad.net/~ubuntu-on-ec2/vmbuilder/jenkins_kvm-oddbloke/+merge/341339

795. By Dan Watkins

Drop unscd from bionic Azure images [a=Odd_Bloke][r=fginther,philroche]

MP: https://code.launchpad.net/~daniel-thewatkins/vmbuilder/jenkins_kvm-drop-unscd/+merge/337830

794. By Dan Watkins

do not explicitly install cloud-init [a=mwhudson][r=fginther,Odd_Bloke,philroche]

MP: https://code.launchpad.net/~mwhudson/vmbuilder/jenkins_kvm.mwhudson/+merge/334878

793. By Francis Ginther

Update source image and package set for artful. Dropping packages that no longer exist.
[a=fginther][r=daniel-thewatkins,philroche,tribaal]

MP: https://code.launchpad.net/~fginther/vmbuilder/new-artful-builder/+merge/332487

792. By Francis Ginther

Add a bb-series version of the Azure suite specific template files, img-azure-18.04-addin.tmpl.
[a=fginther][r=daniel-thewatkins,rcj]

MP: https://code.launchpad.net/~fginther/vmbuilder/jenkins_kvm-add-azure-18.04/+merge/332368

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file 'azure_config.sh'
2--- azure_config.sh 1970-01-01 00:00:00 +0000
3+++ azure_config.sh 2018-05-31 04:33:07 +0000
4@@ -0,0 +1,95 @@
5+#!/bin/bash
6+
7+# Load up some libraries
8+my_dir="$( cd "$( dirname "$0" )" && pwd )"
9+source "${my_dir}/functions/locker"
10+source "${my_dir}/functions/common"
11+source "${my_dir}/functions/retry"
12+source "${my_dir}/functions/merge_templates"
13+
14+usage() {
15+ cat <<EOM
16+${0##/} - Populated values in build temple.
17+
18+ Required:
19+ --template Template file
20+ --extra Extra, arbitrary addin
21+ --serial The build serial
22+ --out The output file
23+ --tar Name of tar file
24+ --tar-d Name of directory to tar up
25+ --version The version number of the distro
26+ --proposed Build against proposed
27+ --docker Install Docker/Docker compose
28+EOM
29+}
30+
31+short_opts="h"
32+long_opts="out:,template:,serial:,tar:,tar-d:,version:,proposed,docker,extra:"
33+getopt_out=$(getopt --name "${0##*/}" \
34+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
35+ eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
36+
37+serial=${SERIAL:-$(date +%Y%m%d)}
38+
39+# Standard templates
40+template_f="$(readlink -f ${0%/*}/templates/img-azure.tmpl)"
41+template_netaddin_f="$(readlink -f ${0%/*}/templates/img-extra-nets.tmpl)"
42+template_extra_f="$(readlink -f ${0%/*}/templates/img-azure-extra.tmpl)"
43+extra_addins=()
44+
45+while [ $# -ne 0 ]; do
46+ cur=${1}; next=${2};
47+ case "$cur" in
48+ --template) template_f=$2; shift;;
49+ --extra) extra_addins+=($2); shift;;
50+ --serial) serial=$2; shift;;
51+ --tar) tar_f=$2; shift;;
52+ --tar-d) tar_d=$2; shift;;
53+ --out) out_f=$2; shift;;
54+ --version) version=$2; shift;;
55+ --proposed) proposed="true";;
56+ --docker) docker="1";;
57+ --) shift; break;;
58+ esac
59+ shift;
60+done
61+
62+fail() { echo "${@}" 2>&1; exit 1;}
63+fail_usage() { fail "Must define $@"; }
64+
65+# Create the template file for image conversion
66+sed -e "s,%S,${serial},g" \
67+ -e "s,%v,${version},g" \
68+ -e "s,%P,${proposed:-false},g" \
69+ ${template_f} > ${out_f}.base ||
70+ fail "Unable to write template file"
71+
72+# Support per-suite addins
73+net_addin=1
74+
75+# Disable the extra nets for Azure due due to the systemd changes
76+dist_ge ${version} vivid && net_addin=0
77+
78+# Order the addins
79+default_addin="${template_f//.tmpl/}-${version}-addin.tmpl"
80+docker_addin="${template_f//.tmpl/}-${version}-docker.tmpl"
81+
82+addins=(${default_addin})
83+[ ${net_addin:-0} -eq 1 ] && addins+=("${template_netaddin_f}")
84+[ ${docker:-0} -eq 1 -a -f "${docker_addin}" ] && addins+=("${docker_addin}")
85+addins+=("${extra_addins[@]}" "${template_extra_f}")
86+
87+merge_templates ${out_f}.base ${out_f} ${addins[@]}
88+
89+debug "=================================================="
90+debug "Content of template:"
91+cat ${out_f}
92+debug "=================================================="
93+
94+if [ -n "${tar_d}" ]; then
95+ tar -C "${tar_d}" -cf "${tar_f}" . &&
96+ debug "TAR'd up ${tar_d}" ||
97+ fail "Failed to tar up ${tar_d}"
98+fi
99+exit 0
100
101=== added file 'base_indicies.sh'
102--- base_indicies.sh 1970-01-01 00:00:00 +0000
103+++ base_indicies.sh 2018-05-31 04:33:07 +0000
104@@ -0,0 +1,28 @@
105+#!/bin/bash -xe
106+#
107+# Simple job for creating indicies
108+suite="${1:-$SUITE}"
109+serial="${2:-$SERIAL}"
110+
111+umask 022
112+cronrun="/srv/builder/vmbuilder/bin/cronrun"
113+
114+# Override and set some home variables
115+export HOME="/srv/builder/vmbuilder"
116+export CDIMAGE_BIN="${CDIMAGE_BIN:-$HOME/cdimage/bin}"
117+export CDIMAGE_ROOT="${CDIMAGE_ROOT:-$HOME/cdimage}"
118+export PUBLISH_SCRIPTS="${PUBLISH_SCRIPTS:-$HOME/ec2-publishing-scripts}"
119+export PATH="${PUBLISH_SCRIPTS}:${CDIMAGE_BIN}:${PATH}"
120+
121+fail() { echo "${@}" 2>&1; exit 1;}
122+
123+echo "Checksumming result directories"
124+work_d="${WORKD:-/srv/ec2-images}/${suite}/${serial}"
125+
126+${CDIMAGE_BIN}/checksum-directory "${work_d}" &&
127+ checksum-directory "${work_d}/unpacked" ||
128+ fail "Failed to checksum result directories"
129+
130+${PUBLISH_SCRIPTS}/update-build-indexes daily ${work_d} ${suite} &&
131+ update-build-indexes daily ${work_d} ${suite} ||
132+ fail "Failed to make the indexes for ${work_d}"
133
134=== added file 'build-juju-local.sh'
135--- build-juju-local.sh 1970-01-01 00:00:00 +0000
136+++ build-juju-local.sh 2018-05-31 04:33:07 +0000
137@@ -0,0 +1,108 @@
138+#!/bin/bash
139+
140+# Read in the common files
141+myname=$(readlink -f ${0})
142+mydir=$(dirname ${myname})
143+mypdir=$(dirname ${mydir})
144+
145+# Scope stuff locally here
146+# Create a temporary directory for the fun
147+tmp_dir=$(mktemp -d builder.XXXXX --tmpdir=${TMPDIR:-/tmp})
148+export TMPDIR=${tmp_dir}
149+export WORKSPACE=${mydir}
150+export HOME=${mydir}
151+export LOCAL_BUILD=1
152+
153+clean() { [ -d ${tmp_dir} ] && rm -rf ${tmp_dir};
154+ [ -d "${mydir}/Virtualbox\ VMS" ] && rm -rf "${mydir}/Virtualbox\ VMS";
155+ exit "${@}"; }
156+error() { echo "$@"; }
157+debug() { error "$(date -R):" "$@"; }
158+fail() { debug "${1:-Something bad happend}"; clean 1; }
159+
160+# Fly with the safety on!
161+trap fail EXIT
162+trap fail SIGINT
163+
164+test_cmd_exists() {
165+ which $1 >> /dev/null || fail "Command $1 does not exist! Please install $2"
166+}
167+
168+if [ "$(lsb_release -r -s | sed 's/\.//')" -lt 1404 ]; then
169+ fail "This must be run on Ubuntu 14.04 or higher"
170+fi
171+
172+test_cmd_exists qemu-nbd qemu-utils
173+test_cmd_exists vboxmanage virtualbox
174+test_cmd_exists bzr bzr
175+test_cmd_exists sstream-query simplestreams
176+
177+# This defines what gets built
178+build_for=(${BUILD_FOR:-trusty:amd64 precise:amd64})
179+[ -n "${JUJU_CORE_PKG}" -o -n "${JUJU_LOCAL_PKG}" ] && \
180+ [ ${#build_for[@]} -ge 2 ] && \
181+ fail "JUJU_CORE_PKG and JUJU_LOCAL_PKG can be specified only for a single build target."
182+
183+for build in ${build_for[@]};
184+do
185+ suite=${build%%:*}
186+ arch=${build##*:}
187+ builder_img="${mydir}/${suite}-builder-${arch}.img"
188+ results_d_arch="${mydir}/${suite}-${arch}"
189+ built_img="${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.img"
190+
191+ [ ! -e "${results_d_arch}" ] &&
192+ mkdir -p "${results_d_arch}"
193+
194+ cmd=(
195+ "${mydir}/standalone.sh"
196+ "--cloud_cfg ${mydir}/config/cloud-vps.cfg"
197+ "--template ${mydir}/templates/img-juju.tmpl"
198+ "--suite ${suite}"
199+ "--arch ${arch}"
200+ "--use_img ${builder_img}"
201+ "--final_img ${built_img}"
202+ "--resize_final 40"
203+ )
204+
205+ [ ! -e "${builder_img}" ] && cmd+=("--fetch_new")
206+ if [ -n "${JUJU_CORE_PKG}" -o -n "${JUJU_LOCAL_PKG}" ]; then
207+ cmd+=("--cloud-init-file ${mydir}/templates/handle-xdeb.py:text/part-handler")
208+ if [ -n "${JUJU_CORE_PKG}" ]; then
209+ cmd+=("--cloud-init-file ${JUJU_CORE_PKG}:application/x-deb")
210+ echo "JUJU_CORE_PKG=$(basename $JUJU_CORE_PKG)" > ${tmp_dir}/juju-sources.sh
211+ fi
212+ if [ -n "${JUJU_LOCAL_PKG}" ]; then
213+ cmd+=("--cloud-init-file ${JUJU_LOCAL_PKG}:application/x-deb")
214+ echo "JUJU_LOCAL_PKG=$(basename $JUJU_LOCAL_PKG)" >> ${tmp_dir}/juju-sources.sh
215+ fi
216+ cmd+=("--cloud-init-file ${tmp_dir}/juju-sources.sh:application/x-shellscript")
217+ fi
218+
219+ [ -e "${results_d_arch}/${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.img" ] ||
220+ ( cd ${results_d_arch} && ${cmd[@]} )
221+
222+ # The following Vagrant-ifies the build
223+ SUITE=${suite} \
224+ ARCH_TYPE=${arch} \
225+ SERIAL="current" \
226+ SRV_D="${mydir}/${suite}-${arch}" \
227+ OUTPUT_D="${mydir}/${suite}-${arch}" \
228+ WORKSPACE="${mydir}/${suite}-${arch}" \
229+ ${mydir}/jenkins/CloudImages_Juju.sh
230+
231+ expected_box="${results_d_arch}/${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.box"
232+ [ -f "${expected_box}" ] || fail "unable to find ${expected_box}; build failed!"
233+ results_out+=("${build} ${expected_box}")
234+done
235+
236+# Clear the traps
237+trap - EXIT
238+trap - SIGINT
239+trap
240+
241+debug "Results are in following locations"
242+echo -e "${results_out[@]}"
243+
244+debug "Done with the build!"
245+clean 0
246
247=== added file 'builder_config.sh'
248--- builder_config.sh 1970-01-01 00:00:00 +0000
249+++ builder_config.sh 2018-05-31 04:33:07 +0000
250@@ -0,0 +1,78 @@
251+#!/bin/bash
252+short_opts="h"
253+long_opts="distro:,arch:,build-type:,bzr-automated-builds:,bzr-pubscripts:,bzr-livebuild:,bzr-vmbuilder:,out:,template:,serial:,proposed"
254+getopt_out=$(getopt --name "${0##*/}" \
255+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
256+ eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
257+
258+usage() {
259+ cat <<EOM
260+${0##/} - Populated values in build temple.
261+
262+ Required:
263+ --distro Distro code name, i.e. precise
264+ --arch Arch, i.e. amd64, i386, armel, armhf
265+ --template Template file
266+ --serial The build serial
267+ --out The output file
268+ --proposed Build against -proposed
269+
270+ Optional:
271+ --bzr-automated-builds bzr branch for automated ec2 builds
272+ --bzr-pubscripts bzr branch of EC2 Publishing Scripts
273+ --bzr-livebuild bzr branch of live-builder
274+ --bzr-vmbuilder bzr branch of vmbuilder
275+EOM
276+}
277+
278+
279+fail() { echo "${@}" 2>&1; exit 1;}
280+
281+serial=$(date +%Y%m%d)
282+bzr_automated_builds="http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/automated-ec2-builds"
283+bzr_pubscripts="http://bazaar.launchpad.net/~ubuntu-on-ec2/ubuntu-on-ec2/ec2-publishing-scripts"
284+bzr_livebuild="http://bazaar.launchpad.net/~ubuntu-on-ec2/live-build/cloud-images"
285+bzr_vmbuilder="http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/0.11a"
286+template_f="${PWD}/img-build.tmpl"
287+proposed=0
288+
289+while [ $# -ne 0 ]; do
290+ cur=${1}; next=${2};
291+ case "$cur" in
292+ --distro) distro=$2; shift;;
293+ --arch) arch=$2; shift;;
294+ --build-type) build_type=$2; shift;;
295+ --bzr-automated-builds) bzr_automated_builds=$2; shift;;
296+ --bzr-pubscripts) bzr_pubscripts=$2; shift;;
297+ --bzr-livebuild) bzr_livebuild=$2; shift;;
298+ --bzr-vmbuilder) bzr_vmbuilder=$2; shift;;
299+ --template) template_f=$2; shift;;
300+ --serial) serial=$2; shift;;
301+ --out) out_f=$2; shift;;
302+ --proposed) proposed=1;;
303+ --) shift; break;;
304+ esac
305+ shift;
306+done
307+
308+fail_usage() { fail "Must define $@"; }
309+
310+[ -z "${distro}" ] && fail_usage "--distro"
311+[ -z "${arch}" ] && fail_usage "--arch"
312+[ -z "${build_type}" ] && fail_usage "--build-type"
313+[ -z "${out_f}" ] && fail_usage "--out"
314+
315+sed -e "s,%d,${distro},g" \
316+ -e "s,%a,${arch},g" \
317+ -e "s,%b,${build_type},g" \
318+ -e "s,%A,${bzr_automated_builds},g" \
319+ -e "s,%P,${bzr_pubscripts},g" \
320+ -e "s,%L,${bzr_livebuild},g" \
321+ -e "s,%V,${bzr_vmbuilder},g" \
322+ -e "s,%S,${serial},g" \
323+ -e "s,%p,${proposed:-0},g" \
324+ -e "s,%C,$(awk 1 ORS='\\n' < "${HOME}/.lp_creds")," \
325+ ${template_f} > ${out_f} ||
326+ fail "Unable to write template file"
327+
328+exit 0
329
330=== added file 'checksum.sh'
331--- checksum.sh 1970-01-01 00:00:00 +0000
332+++ checksum.sh 2018-05-31 04:33:07 +0000
333@@ -0,0 +1,27 @@
334+# Override and set some home variables
335+export HOME="${USE_HOME:-/srv/builder}"
336+export CDIMAGE_BIN="${HOME}/cdimage/bin"
337+export CDIMAGE_ROOT="${HOME}/cdimage"
338+export PATH="${CDIMAGE_BIN}:${PATH}"
339+
340+SUITE_DIR="${BASE_D}/${SUITE}"
341+[ -n "${STREAM}" -a "${STREAM}" != "daily" ] &&
342+ SUITE_DIR="${BASE_D}/${STREAM}/${SUITE}"
343+SERIAL_DIR="${SUITE_DIR}/${SERIAL}"
344+
345+echo "Checksumming the new version..."
346+checksum-directory "${SERIAL_DIR}"
347+
348+if [ ! -d ${SERIAL_DIR}/unpacked ]; then
349+ echo "Adding build info to the new version..."
350+ mkdir -p ${SERIAL_DIR}/unpacked
351+ cat << EOF > ${SERIAL_DIR}/unpacked/build-info.txt
352+SERIAL=$SERIAL
353+EOF
354+fi
355+
356+checksum-directory ${SERIAL_DIR}/unpacked
357+
358+if [ "${UPDATE_CURRENT:-false}" = "true" ]; then
359+ ./update_release_directory.sh "${SUITE_DIR}"
360+fi
361
362=== added directory 'config'
363=== added file 'config/cloud-azure.cfg'
364--- config/cloud-azure.cfg 1970-01-01 00:00:00 +0000
365+++ config/cloud-azure.cfg 2018-05-31 04:33:07 +0000
366@@ -0,0 +1,9 @@
367+#cloud-config
368+package_upgrade: true
369+password: ubuntu
370+chpasswd: { expire: False }
371+ssh_pwauth: True
372+packages:
373+- pastebinit
374+- zerofree
375+- ubuntu-dev-tools
376
377=== added file 'config/cloud-maas.cfg'
378--- config/cloud-maas.cfg 1970-01-01 00:00:00 +0000
379+++ config/cloud-maas.cfg 2018-05-31 04:33:07 +0000
380@@ -0,0 +1,11 @@
381+#cloud-config
382+package_upgrade: true
383+password: ubuntu
384+packages:
385+- bzr
386+- kpartx
387+- qemu-kvm
388+- qemu-kvm-extras
389+- qemu-kvm-extras-static
390+- zerofree
391+
392
393=== added file 'config/cloud-maasv2.cfg'
394--- config/cloud-maasv2.cfg 1970-01-01 00:00:00 +0000
395+++ config/cloud-maasv2.cfg 2018-05-31 04:33:07 +0000
396@@ -0,0 +1,10 @@
397+#cloud-config
398+#This is generic enough to build for both MAAS and general cloud images
399+package_upgrade: true
400+password: ubuntu
401+packages:
402+- bzr
403+- qemu-utils
404+- zerofree
405+- gdisk
406+- proot
407
408=== added file 'config/cloud-maasv3.cfg'
409--- config/cloud-maasv3.cfg 1970-01-01 00:00:00 +0000
410+++ config/cloud-maasv3.cfg 2018-05-31 04:33:07 +0000
411@@ -0,0 +1,10 @@
412+#cloud-config
413+#This is generic enough to build for both MAAS and general cloud images
414+package_upgrade: true
415+password: ubuntu
416+packages:
417+- bzr
418+- qemu-utils
419+- zerofree
420+- gdisk
421+- proot
422
423=== added file 'config/cloud-precise.cfg'
424--- config/cloud-precise.cfg 1970-01-01 00:00:00 +0000
425+++ config/cloud-precise.cfg 2018-05-31 04:33:07 +0000
426@@ -0,0 +1,24 @@
427+#cloud-config
428+package_upgrade: true
429+password: ubuntu
430+chpasswd: { expire: False }
431+ssh_pwauth: True
432+ssh_import_id:
433+- daniel-thewatkins
434+- philroche
435+- rcj
436+packages:
437+- bzr
438+- debootstrap
439+- python-vm-builder
440+- pastebinit
441+- kpartx
442+- qemu-kvm
443+- qemu-kvm-extras
444+- qemu-kvm-extras-static
445+- debhelper
446+- virtualbox
447+- u-boot-tools
448+- zerofree
449+- gdisk
450+- ubuntu-dev-tools
451
452=== added file 'config/cloud-trusty-pp64el.cfg'
453--- config/cloud-trusty-pp64el.cfg 1970-01-01 00:00:00 +0000
454+++ config/cloud-trusty-pp64el.cfg 2018-05-31 04:33:07 +0000
455@@ -0,0 +1,13 @@
456+#cloud-config
457+packages:
458+- bzr
459+- debootstrap
460+- kpartx
461+- debhelper
462+- zerofree
463+- gdisk
464+- qemu-utils
465+- ubuntu-dev-tools
466+- gcc
467+- make
468+- zlib1g-dev
469
470=== added file 'config/cloud-trusty.cfg'
471--- config/cloud-trusty.cfg 1970-01-01 00:00:00 +0000
472+++ config/cloud-trusty.cfg 2018-05-31 04:33:07 +0000
473@@ -0,0 +1,26 @@
474+#cloud-config
475+#This is generic enough to build for both MAAS and general cloud images
476+package_upgrade: true
477+password: ubuntu
478+chpasswd: { expire: False }
479+ssh_pwauth: True
480+ssh_import_id:
481+- daniel-thewatkins
482+- philroche
483+- rcj
484+apt_sources:
485+- source: deb $MIRROR $RELEASE multiverse
486+packages:
487+- bzr
488+- debootstrap
489+- kpartx
490+- qemu-kvm
491+- qemu-user-static
492+- debhelper
493+- virtualbox
494+- zerofree
495+- gdisk
496+- proot
497+- u-boot-tools
498+- ubuntu-dev-tools
499+- zlib1g-dev
500
501=== added file 'config/cloud-vps.cfg'
502--- config/cloud-vps.cfg 1970-01-01 00:00:00 +0000
503+++ config/cloud-vps.cfg 2018-05-31 04:33:07 +0000
504@@ -0,0 +1,6 @@
505+#cloud-config
506+packages:
507+- pastebinit
508+- zerofree
509+- btrfs-tools
510+- ubuntu-dev-tools
511
512=== added file 'config/cloud.cfg'
513--- config/cloud.cfg 1970-01-01 00:00:00 +0000
514+++ config/cloud.cfg 2018-05-31 04:33:07 +0000
515@@ -0,0 +1,17 @@
516+#cloud-config
517+# Generic cloud-config for builder instance
518+package_upgrade: true
519+password: ubuntu
520+chpasswd: { expire: False }
521+ssh_pwauth: True
522+apt_sources:
523+- source: deb $MIRROR $RELEASE multiverse
524+packages:
525+- bzr
526+- zerofree
527+- gdisk
528+- gcc
529+- make
530+- git
531+- ubuntu-dev-tools
532+- zlib1g-dev
533
534=== added file 'copy_to_final.sh'
535--- copy_to_final.sh 1970-01-01 00:00:00 +0000
536+++ copy_to_final.sh 2018-05-31 04:33:07 +0000
537@@ -0,0 +1,52 @@
538+#!/bin/bash
539+#
540+# copies the files to their staging location
541+
542+DISTRO="${DISTRO:-$1}"
543+WORKSPACE="${WORKSPACE:-$2}"
544+SERIAL="${SERIAL:-$3}"
545+BTYPE="${BTYPE:-$4}"
546+BTYPE="${BTYPE:-server}"
547+
548+# Allow for legacy positional arguments
549+test_build="${5:-0}"
550+sandbox_build="${6:-0}"
551+proposed_build="${7:-0}"
552+
553+# Allow for environment variable to control this
554+TEST_BUILD="${TEST_BUILD:-$test_build}"
555+SANDBOX_BUILD="${SANDBOX_BUILD:-$sandbox_build}"
556+PROPOSED_BUILD="${PROPOSED_BUILD:-$proposed_build}"
557+
558+ROOT_D="${ROOT_D:-/srv/ec2-images}"
559+base_d="${ROOT_D}/${DISTRO}/${SERIAL}"
560+[ "${TEST_BUILD}" -eq 1 ] && base_d="${ROOT_D}/test_builds/${DISTRO}/${SERIAL}"
561+[ "${SANDBOX_BUILD}" -eq 1 ] && base_d="${ROOT_D}/sandbox/${DISTRO}/${SERIAL}"
562+[ "${PROPOSED_BUILD}" -eq 1 ] && base_d="${ROOT_D}/proposed/${DISTRO}/${SERIAL}"
563+[ "${BTYPE}" = "desktop" ] && base_d="${ROOT_D}/desktop/${DISTRO}/${SERIAL}"
564+
565+# Make sure that the HWE directory is created
566+if [[ "${BTYPE}" =~ server-hwe ]]; then
567+ base_d="${base_d}/${BTYPE//server-/}"
568+ [ ! -e "${base_d}" ] && mkdir -p "${base_d}"
569+fi
570+
571+for roottar in $(find . -iname "*root.tar.gz"); do
572+ echo "Generating file listing"
573+
574+ case ${roottar} in
575+ *amd64*) arch_name="amd64";;
576+ *i386*) arch_name="i386";;
577+ *armel*) arch_name="armel";;
578+ *armhf*) arch_name="armhf";;
579+ *ppc64*) arch_name="ppc64el";;
580+ *arm64*) arch_name="arm64";;
581+ *) arch_name="unknown-$(date +%s)";;
582+ esac
583+
584+ tar -tzvf ${roottar} >> "${WORKSPACE}/file-list-${arch_name}.log" ||
585+ echo "Non fatal error. Failed to gather file list for ${roottar}"
586+done
587+
588+cp -au ${DISTRO}-*/* ${base_d} || exit 1
589+exit 0
590
591=== added file 'create-vhd.sh'
592--- create-vhd.sh 1970-01-01 00:00:00 +0000
593+++ create-vhd.sh 2018-05-31 04:33:07 +0000
594@@ -0,0 +1,97 @@
595+#!/bin/bash
596+source "./functions/locker"
597+
598+usage() {
599+cat << EOF
600+This program is used to convert raw images to VHD files.
601+
602+ --suite: the Ubuntu Code name to build against
603+ --source_file: the name of the raw image file to convert
604+ --size: the size of the converted image in G (defaults to 30G)
605+EOF
606+exit 1
607+}
608+
609+# Defaults
610+vhd_size=30
611+
612+# Command line parsing
613+short_opts="h"
614+long_opts="suite:,source_file:,size:"
615+getopt_out=$(getopt --name "${0##*/}" --options "${short_opts}"\
616+ --long "${long_opts}" -- "$@")
617+if [ $? -eq 0 ]; then
618+ eval set -- "${getopt_out}"
619+else
620+ usage
621+ exit 1
622+fi
623+
624+while [ $# -ne 0 ]; do
625+ cur=${1}; next=${2};
626+
627+ case "${cur}" in
628+ --size) vhd_size="${2}"; shift;;
629+ --source_file) source_file="${2}"; shift;;
630+ --suite) suite="${2}"; shift;;
631+ -h|--help) usage; exit 0;;
632+ ?) usage; exit 1;;
633+ --) shift; break;;
634+ esac
635+ shift;
636+done
637+
638+if [ -z "$source_file" -o -z "$suite" ]; then
639+ echo "--source_file and --suite required."
640+ exit 1
641+fi
642+
643+raw_name=$(readlink -f "$source_file")
644+case ${suite} in
645+ precise|trusty|wily|xenial)
646+ vhd_name="${PWD}/${suite}-server-cloudimg-amd64-disk1.vhd"
647+ ;;
648+ *)
649+ vhd_name="${PWD}/${suite}-server-cloudimg-amd64.vhd"
650+ ;;
651+esac
652+
653+# Copy the raw image to make it ready for VHD production
654+cp --sparse=always "${raw_name}" "${raw_name}.pre-vhd" &&
655+ debug "Copied raw image VHD production" ||
656+ fail "Failed to copy raw image to ${raw_name}.pre-vhd"
657+
658+# Resize the copied RAW image
659+debug "Truncating image to ${vhd_size}G"
660+truncate -s "${vhd_size}G" "${raw_name}.pre-vhd" &&
661+ debug "Truncated image at ${vhd_size}G" ||
662+ fail "Failed to truncate disk image"
663+
664+# Convert to VHD first, step 1 of cheap hack
665+# This is a cheap hack...half the time the next command
666+# will fail with "VERR_INVALID_PARAMETER", so this is the,
667+# er, workaround
668+debug "Converting to VHD"
669+_vbox_cmd convertfromraw --format VHD \
670+ "${raw_name}.pre-vhd" \
671+ "${vhd_name}.pre" &&
672+ debug "Converted raw disk to VHD" ||
673+ fail "Failed to convert raw image to VHD"
674+
675+# Clone the disk to fixed, VHD for Azure
676+debug "Converting to VHD format from raw..."
677+debug ".....this might take a while...."
678+_vbox_cmd clonehd --format VHD --variant Fixed \
679+ "${vhd_name}.pre" \
680+ "${vhd_name}" &&
681+ debug "Converted raw disk to VHD format using VirtualBox" ||
682+ fail "Failed to convert raw image to VHD disk!"
683+
684+# Remove the unneeded files
685+rm "${vhd_name}.pre" "${raw_name}.pre-vhd"
686+
687+debug "Image Characteristics:"
688+_vbox_cmd showhdinfo "${vhd_name}"
689+
690+
691+debug "Raw image converted to VHD"
692
693=== added file 'ec2_publisher.sh'
694--- ec2_publisher.sh 1970-01-01 00:00:00 +0000
695+++ ec2_publisher.sh 2018-05-31 04:33:07 +0000
696@@ -0,0 +1,98 @@
697+#!/bin/bash
698+#
699+# Simple execution wrapper for publishing images to EC2 from within Jenkins
700+#
701+suite="${1}"
702+serial="${2}"
703+btype="${3}"
704+work_d="${4}"
705+test_build="${5:-0}"
706+sandbox_build="${6:-0}"
707+allow_existing="${7:-0}"
708+pub_type="daily"
709+
710+umask 022
711+ec2_pub_scripts="${EC2_PUB_LOC:-${PWD}/ec2-publishing-scripts}"
712+cronrun="/srv/builder/vmbuilder/bin/cronrun"
713+
714+# Override and set some home variables
715+export HOME="/srv/builder/vmbuilder"
716+export EC2_DAILY="${EC2_DAILY:-$HOME/ec2-daily}"
717+export CDIMAGE_BIN="${CDIMAGE_BIN:-$HOME/cdimage/bin}"
718+export CDIMAGE_ROOT="${CDIMAGE_ROOT:-$HOME/cdimage}"
719+AUTO_BUILDS="${AUTO_BUILDS:-$EC2_DAILY/automated-ec2-builds}"
720+PUBLISH_SCRIPTS="${PUBLISH_SCRIPTS:-$HOME/ec2-publishing-scripts}"
721+XC2_PATH="${EC2_DAILY}/xc2"
722+S3CMD_PATH="${S3CMD_PATH:-$EC2_DAILY/s3cmd}"
723+MISC_PATH="${MISC_PATH:-$EC2_DAILY/misc}"
724+VMBUILDER_PATH="${VMBUILDER_PATH:-$EC2_DAILY/vmbuilder}"
725+( which euca-version >> /dev/null >&1 ) || EUCA2OOLS_PATH="${EC2_DAILY}/euca2ools"
726+BOTO_PATH="${EC2_DAILY}/boto"
727+
728+export EC2_AMITOOL_HOME="${EC2_DAILY}/ec2-ami-tools"
729+export LIVE_BUILD_PATH="${EC2_DAILY}/live-build"
730+MYPATH=${VMBUILDER_PATH}:${XC2_PATH}:${S3CMD_PATH}:${PUBLISH_SCRIPTS}:${AUTO_BUILDS}:${VMBUILDER_PATH}:${EC2_AMITOOL_HOME}/bin:$HOME/bin:${CDIMAGE_BIN}
731+
732+[ -n "${EUCA2OOLS_PATH}" ] && MYPATH="${MYPATH}:${EUCA2OOLS_PATH}/bin"
733+
734+export PYTHONPATH="${BOTO_PATH}:${EUCA2OOLS_PATH}"
735+export PATH=${MYPATH}:/usr/bin:/usr/sbin:/usr/bin:/sbin:/bin
736+export JAVA_HOME=/usr
737+export START_D=${EC2_DAILY}
738+export PUBLISH_BASE=/srv/ec2-images
739+export XC2_RETRY_ON="Server.InternalError Read.timeout Server.Unavailable Unable.to.connect"
740+
741+export PATH="/srv/builder/vmbuilder/cdimage/bin:${ec2_pub_scripts}:${PATH}"
742+
743+fail() { echo "${@}" 2>&1; exit 1;}
744+
745+[ -e "${ec2_pub_scripts}" ] ||
746+ fail "Please make sure that ec2-publishing-scripts in the current path or define EC2_PUB_LOC"
747+
748+[ "$#" -eq 4 -o "$#" -eq 5 -o "$#" -eq 6 -o "$#" -eq 7 ] ||
749+ fail "Incorrect number of parameters. Must invoke with: <suite> <serial> <build type> <directory>"
750+
751+[ "${test_build}" -eq 1 ] && {
752+ echo "Build has been marked as a test build!";
753+ echo "Publishing image to sandbox location";
754+ pub_type="testing";
755+}
756+
757+[ "${sandbox_build}" -eq 1 ] && {
758+ echo "Build has been marked as a sandbox build!";
759+ echo "Publishing image to Sandbox location";
760+ pub_type="sandbox";
761+}
762+
763+echo "Checksumming result directories"
764+checksum-directory "${work_d}" &&
765+ checksum-directory "${work_d}/unpacked" ||
766+ fail "Failed to checksum result directories"
767+
768+# Drop ebs-standard and ebs-io1 from publication for xenial and after
769+if [[ "${suite}" > "xenial" || "${suite}" == "xenial" ]] ; then
770+ export OVERRIDE_ITEMS_EBS="i386:ebs-ssd amd64:ebs-ssd"
771+ export OVERRIDE_ITEMS_HVM="amd64:hvm-ssd"
772+fi
773+
774+echo "Publishing to EC2"
775+pub_args=(--verbose)
776+[ "${allow_existing}" -eq 1 ] && pub_args+=(--allow-existing)
777+${cronrun} publish-build \
778+ "${pub_args[@]}" \
779+ "${suite}" \
780+ "${btype}" \
781+ "${pub_type}" \
782+ "${work_d}" ||
783+ fail "failed publish-build ${suite} ${btype} daily ${work_d}"
784+
785+# Update current
786+base_d="${work_d%/*}"
787+serial_d="${work_d##*/}"
788+current_d="${base_d}/current"
789+[ -e "${current_d}" ] && rm "${current_d}"
790+( cd "${base_d}" && ln -s "${serial_d}" current ) ||
791+ fail "failed to update current directory"
792+
793+exit 0
794+
795
796=== added directory 'functions'
797=== added file 'functions/bzr_check.sh'
798--- functions/bzr_check.sh 1970-01-01 00:00:00 +0000
799+++ functions/bzr_check.sh 2018-05-31 04:33:07 +0000
800@@ -0,0 +1,14 @@
801+#!/bin/bash
802+
803+error() { echo "$@" 1>&2; }
804+fail() { error "$@"; exit 1; }
805+debug() { error "$(date -R):" "$@"; }
806+
807+check_branch() {
808+ [ -e "${2}" ] && rm -rf "${2}"
809+ debug "Checking out ${1} to ${2}"
810+ bzr checkout --lightweight "${1}" "${2}" &&
811+ debug "Checked out ${1}" ||
812+ fail "Failed to checkout ${1}"
813+}
814+
815
816=== added file 'functions/bzr_commit.sh'
817--- functions/bzr_commit.sh 1970-01-01 00:00:00 +0000
818+++ functions/bzr_commit.sh 2018-05-31 04:33:07 +0000
819@@ -0,0 +1,23 @@
820+#!/bin/bash
821+info_dir=${1}
822+oargs=${*//${1}/}
823+TEMP_D=""
824+error() { echo "$@" 1>&2; }
825+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
826+
827+echo "Commit comment is: ${oargs}"
828+if [ ! -d "${info_dir}/.bzr" ]; then
829+ ( cd "${info_dir}" && bzr init && bzr add --quiet . &&
830+ bzr commit --quiet -m "initial state" ) >/dev/null &&
831+ error "initialized bzr directory in ${info_dir}" ||
832+ fail "failed to initialize bzr directory in ${info_dir}"
833+fi
834+
835+bzr add "${info_dir}"
836+if bzr diff "${info_dir}" >/dev/null; then
837+ error "no changes were made to ${info_dir}"
838+else
839+ bzr commit -m "${oargs[*]}" "${info_dir}" ||
840+ fail "failed to bzr commit in ${info_dir}"
841+fi
842+
843
844=== added file 'functions/common'
845--- functions/common 1970-01-01 00:00:00 +0000
846+++ functions/common 2018-05-31 04:33:07 +0000
847@@ -0,0 +1,37 @@
848+# Common functions
849+# vi: syntax=sh expandtab ts=4
850+
851+error() { echo "$@" 1>&2; }
852+fail() { error "$@"; exit 1; }
853+debug() { echo "$(date -R): $@" 1>&2; }
854+run() { echo "$(date -R): running cmd: ${@}";
855+ env ${@} && debug "Command successful: ${@}" ||
856+ fail "failed to run cmd: ${@}"; }
857+
858+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
859+dist_le() { [[ "$1" < "$2" || "$1" == "$2" ]]; }
860+
861+map_version_to_suite() {
862+ version=(${1//-LTS/ LTS})
863+ awk '-F[, ]' \
864+ '$2 ~ /LTS/ && $1 ==V {print $5}; $2 !~ /LTS/ && $1 == V {print $4}' \
865+ V="${version[0]}" /usr/share/distro-info/ubuntu.csv
866+}
867+
868+map_suite_to_version() {
869+ suite=${1}
870+ awk '-F[, ]' \
871+ '$2 ~ /LTS/ && $5 == S {print $1"-"$2}; $2 !~ /LTS/ && $4 == S {print $1}' \
872+ S="${suite}" /usr/share/distro-info/ubuntu.csv
873+}
874+
875+# Look for common names
876+[ -z "${kvm}" -a -n "${kvm_builder}" ] && kvm="${kvm_builder}"
877+[ -z "${kvm_builder}" -a -n "${kvm}" ] && kvm_builder="${kvm}"
878+
879+[ -n "${kvm}" ] && scripts="${kvm}"
880+[ -n "${kvm_builder}" ] && scripts="${kvm_builder}"
881+
882+export kvm="${scripts}"
883+export kvm_builder="${scripts}"
884+export scripts
885
886=== added file 'functions/locker'
887--- functions/locker 1970-01-01 00:00:00 +0000
888+++ functions/locker 2018-05-31 04:33:07 +0000
889@@ -0,0 +1,49 @@
890+# This prevents concurrent commands from running.
891+_script=$(readlink -f "${BASH_SOURCE[0]:?}")
892+_my_dir=$(dirname "$_script")
893+source "${_my_dir}/common"
894+source "${_my_dir}/retry"
895+
896+cmd_lock() {
897+ LOCKFILE="/tmp/wrapper-`basename $1`"
898+ LOCKFD=99
899+
900+ _lock() { flock -$1 $LOCKFD; }
901+ _no_more_locking() { _lock u; _lock xn && rm -f $LOCKFILE; }
902+ _prepare_locking() { eval "exec $LOCKFD>\"$LOCKFILE\""; trap _no_more_locking EXIT; }
903+
904+ _prepare_locking
905+
906+ exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail
907+ exlock() { _lock x; } # obtain an exclusive lock
908+ shlock() { _lock s; } # obtain a shared lock
909+ unlock() { _lock u; } # drop a lock
910+
911+ count=0
912+ max_count=60
913+
914+ while (! exlock_now );
915+ do
916+ let wait_time=$RANDOM%30
917+ error "Waiting ${wait_time} seconds due to concurrent ${1} command"
918+ sleep ${wait_time}
919+
920+ count=$(expr ${count} + 1)
921+
922+ if [ ${count} -gt ${max_count} ]; then
923+ echo "Max wait expired. Failing."
924+ exit 1
925+ fi
926+ done
927+
928+ error "Executing command, lock is free for: ${@}"
929+ "${@}"
930+ unlock
931+}
932+
933+_vbox_cmd() {
934+ # Virtual box is a real pain. This function uses the locker function above to
935+ # wrap up vboxmanage to prevent its stupid issues with concurrency.
936+ cmd_lock vboxmanage ${@} ||
937+ fail "Failed to execute locked command: vboxmange ${@}"
938+}
939
940=== added file 'functions/merge_templates'
941--- functions/merge_templates 1970-01-01 00:00:00 +0000
942+++ functions/merge_templates 2018-05-31 04:33:07 +0000
943@@ -0,0 +1,53 @@
944+#!/bin/bash
945+# vi: ts=4 noexpandtab syntax=sh
946+#
947+# This is just like mk_template.sh, but differs in that it handles
948+# an arbitrary number of templates being merged in.
949+#
950+# ARG1 - base template
951+# ARG2 - final templates
952+# ARG* - addin templates
953+
954+# This merges templates together
955+merge_templates() {
956+ local cur_dir=${PWD}
957+ local args=(${@})
958+ local main_template=${1}; args=("${args[@]:1}")
959+ local new_template=${2}; args=("${args[@]:1}")
960+ local addins=("${args[@]}")
961+
962+ if [ "${#addins[@]}" -ge 1 ]; then
963+ ntmp_dir=$(mktemp -d template.XXXXX --tmpdir=${TMPDIR:-/tmp})
964+ cd ${ntmp_dir}
965+
966+ # Split the base template "ADDIN_HERE"
967+ awk '/ADDIN_HERE/{n++}{print >"template" n ".txt" }' \
968+ ${main_template} ||
969+ fail "failed to split template!"
970+
971+ # Combine the split template with the addin in the middle
972+ cat template.txt \
973+ ${addins[@]} \
974+ template1.txt \
975+ > ${new_template}
976+
977+ # Do some variable replacement
978+ sed -e "s,ADDIN_HERE,# END Addins,g" \
979+ -e "s,%%PPA%%,${PPA},g" \
980+ -e "s,%%PROPOSED%%,${PROPOSED:-0},g" \
981+ -i ${new_template} ||
982+ fail "Unable to finalize template!"
983+
984+ else
985+
986+ sed -e "s,ADDIN_HERE,# END Addins,g" \
987+ "${main_template}" > "${new_template}"
988+
989+ fi
990+
991+ # Remove the temp directory if it exists
992+ [ -n "${ntmp_dir}" ] && rm -rf "${ntmp_dir}"
993+
994+ # Get back to where we started
995+ cd ${cur_dir}
996+}
997
998=== added file 'functions/mk_template.sh'
999--- functions/mk_template.sh 1970-01-01 00:00:00 +0000
1000+++ functions/mk_template.sh 2018-05-31 04:33:07 +0000
1001@@ -0,0 +1,41 @@
1002+#!/bin/bash
1003+
1004+# This merges templates together
1005+merge_template() {
1006+ cur_dir=${PWD}
1007+ main_template=${1}
1008+ addin_template=${2}
1009+ new_template=${3}
1010+
1011+ if [ -n "${addin_template}" ]; then
1012+ ntmp_dir=$(mktemp -d template.XXXXX --tmpdir=${TMPDIR:-/tmp})
1013+ cd ${ntmp_dir}
1014+
1015+ # Split the base template "ADDIN_HERE"
1016+ awk '/ADDIN_HERE/{n++}{print >"template" n ".txt" }' \
1017+ ${main_template} ||
1018+ fail "failed to split template!"
1019+
1020+ # Combine the split template with the addin in the middle
1021+ cat template.txt \
1022+ ${addin_template} \
1023+ template1.txt \
1024+ > ${new_template}
1025+
1026+ # Do some variable replacement
1027+ sed -e "s,ADDIN_HERE,# END Addins,g" \
1028+ -e "s,%%PPA%%,${PPA},g" \
1029+ -e "s,%%PROPOSED%%,${PROPOSED:-0},g" \
1030+ -i ${new_template} ||
1031+ fail "Unable to finalize template!"
1032+
1033+ else
1034+ "${main_template}" "${new_template}"
1035+ fi
1036+
1037+ # Remove the temp directory if it exists
1038+ [ -n "${ntmp_dir}" ] && rm -rf "${ntmp_dir}"
1039+
1040+ # Get back to where we started
1041+ cd ${cur_dir}
1042+}
1043
1044=== added file 'functions/retry'
1045--- functions/retry 1970-01-01 00:00:00 +0000
1046+++ functions/retry 2018-05-31 04:33:07 +0000
1047@@ -0,0 +1,16 @@
1048+# Code for retrying commands
1049+
1050+retry() {
1051+ local trycount=${1} sleep=${2}
1052+ shift; shift;
1053+ local i=0 smsg=" sleeping ${sleep}: $*" ret=0
1054+ for((i=0;i<${trycount};i++)); do
1055+ "$@" && return 0
1056+ ret=$?
1057+ [ $(($i+1)) -eq ${trycount} ] && smsg=""
1058+ debug 1 "Warning: cmd failed [try $(($i+1))/${trycount}].${smsg}"
1059+ sleep $sleep
1060+ done
1061+ return $ret
1062+}
1063+
1064
1065=== added file 'generate-ubuntu-lists.sh'
1066--- generate-ubuntu-lists.sh 1970-01-01 00:00:00 +0000
1067+++ generate-ubuntu-lists.sh 2018-05-31 04:33:07 +0000
1068@@ -0,0 +1,44 @@
1069+#!/bin/bash
1070+# Generate a list of Ubuntu releases
1071+
1072+final_d="${FIANL_D:-/srv/jenkins}"
1073+tmpd=$(mktemp -d)
1074+
1075+trap "rm -rf ${tmpd}" EXIT SIGINT
1076+
1077+# Get the regular info
1078+ubuntu-distro-info --supported \
1079+ > ${tmpd}/ubuntu-supported.txt
1080+
1081+ubuntu-distro-info --all \
1082+ > ${tmpd}/ubuntu-all.txt
1083+
1084+ubuntu-distro-info --unsupported \
1085+ > ${tmpd}/ubuntu-unsupported.txt
1086+
1087+ubuntu-distro-info --release --supported \
1088+ > ${tmpd}/ubuntu-versions.txt
1089+
1090+# Populate releases which may be missing
1091+for suite in vivid:15.04 wily:15.10 xenial:16.04;
1092+do
1093+ echo "${suite%%:*}" >> ${tmpd}/ubuntu-supported.txt
1094+ echo "${suite%%:*}" >> ${tmpd}/ubuntu-all.txt
1095+ echo "${suite##*:}" >> ${tmpd}/ubuntu-versions.txt
1096+done
1097+
1098+# Sort and make it pretty
1099+cat ${tmpd}/ubuntu-supported.txt \
1100+ | sort -r -u > ${final_d}/ubuntu-supported.txt
1101+
1102+cat ${tmpd}/ubuntu-all.txt \
1103+ | egrep -v warty \
1104+ | sort -r -u > ${final_d}/ubuntu-all.txt
1105+
1106+cat ${tmpd}/ubuntu-versions.txt \
1107+ | sed "s, ,-,g" \
1108+ | sort -r -u \
1109+ > ${final_d}/ubuntu-versions.txt
1110+
1111+sort -r -u ${tmpd}/ubuntu-unsupported.txt \
1112+ > ${final_d}/ubuntu-unsupported.txt
1113
1114=== added file 'get_serial.sh'
1115--- get_serial.sh 1970-01-01 00:00:00 +0000
1116+++ get_serial.sh 2018-05-31 04:33:07 +0000
1117@@ -0,0 +1,157 @@
1118+#!/bin/bash -xe
1119+#
1120+# Determine the build serial and place files into the build serial location
1121+# Also, handle the unlikely race condition in case multiple builders arrive
1122+# At the same point.
1123+# copies the files to their staging location
1124+# Prevent race conditions for populating the aggregate build directory
1125+#
1126+# OUTPUT:
1127+# - serial.txt file in ${WORKSPACE}
1128+# - build_properties (OR ${BUILD_PROPERTIES}) file in ${PWD}
1129+# - build-info.txt in ${base_d}/unpacked (or ${base_nd}/unpacked})
1130+# NOTE: see code for how base_d and base_nd are computed
1131+
1132+# Required options to even do a build
1133+DISTRO="${DISTRO:-$1}"
1134+WORKSPACE="${WORKSPACE:-$2}" # where is the workspace
1135+BUILD_ID="${BUILD_ID:-$3}" # build id
1136+
1137+# Convert hwe builds to regular for the sake of tooling
1138+btype="${4:-server}" # server or something else
1139+if [[ "${btype}" =~ hwe ]]; then
1140+ hwe_btype="${btype}"
1141+ bytpe="server"; BTYPE="server"
1142+fi
1143+
1144+# Support the legacy broken positional stuff. This should have been
1145+# done with environmental variables or flags
1146+test_build="${5:-0}" # test build?
1147+sandbox_build="${6:-0}" # should it be a sandbox build
1148+allow_existing="${7:-1}" # allow existing
1149+publish_image="${8:-0}" # publish the image
1150+proposed_build="${9:-0}" # build from proposed
1151+
1152+# Make this less confusing by allowing someone to use environmental
1153+# variables.
1154+# TODO: utlemming: convert this --flags
1155+BTYPE="${BTYPE:-$btype}"
1156+TEST_BUILD="${TEST_BUILD:-$test_build}"
1157+SANDBOX_BUILD="${SANDBOX_BUILD:-$sandbox_build}"
1158+PUBLISH_IMAGE="${PUBLISH_IMAGE:-$publish_image}"
1159+PROPOSED_BUILD="${PROPOSED_BUILD:-$proposed_build}"
1160+
1161+ROOT_D="${ROOT_D:-/srv/ec2-images}"
1162+base_d="${ROOT_D}/${DISTRO}"
1163+[ "${TEST_BUILD}" -eq 1 ] && base_d="${ROOT_D}/test_builds/${DISTRO}"
1164+[ "${SANDBOX_BUILD}" -eq 1 ] && base_d="${ROOT_D}/sandbox/${DISTRO}" && TEST_BUILD=0
1165+[ "${BTYPE}" = "desktop" ] && base_d="${ROOT_D}/desktop/${DISTRO}"
1166+[ "${PROPOSED_BUILD}" -eq 1 ] && base_d="${ROOT_D}/proposed/${DISTRO}" &&
1167+ TEST_BUILD=0 && SANDBOX_BUILD=0
1168+
1169+let wait_time=$RANDOM%50
1170+sleep $wait_time # Make build collisions a bit harder
1171+
1172+make_hwe_meta() {
1173+ # Create a sub build-info.txt for HWE builds
1174+ serial="${1##*/}"
1175+ hwe_unpacked="${base_d}/${serial}/${hwe_btype//$BTYPE-/}/unpacked"
1176+ if [ -n "${hwe_btype}" ]; then
1177+ [ -d "${hwe_unpacked}" ] || mkdir -p "${hwe_unpacked}"
1178+ cat << EOF > "${hwe_unpacked}/build-info.txt"
1179+serial=${serial}
1180+orig_prefix=${DISTRO}-${hwe_btype}-cloudimg
1181+suite=${DISTRO}
1182+build_name=${hwe_btype}
1183+EOF
1184+ fi
1185+}
1186+
1187+make_meta() {
1188+ # Write the property file for publishing. This used
1189+ # to write trigger the EC2 publishing job
1190+ serial=${1##*/}
1191+ cat << EOM > "${BUILD_PROPERTIES:-$WORKSPACE/build_properties}"
1192+BUILD_TYPE=${BTYPE}
1193+SERIAL=${serial}
1194+SUITE=${DISTRO}
1195+TEST_BUILD=${TEST_BUILD}
1196+SANDBOX_BUILD=${SANDBOX_BUILD}
1197+PUBLISH_IMAGE=${PUBLISH_IMAGE}
1198+ALLOW_EXISTING=${ALLOW_EXISTING}
1199+PROPOSED_BUILD=${PROPOSED_BUILD}
1200+EOM
1201+
1202+ # Write the build-info.txt file. This is used in
1203+ # the publishing process
1204+ [ -d "${1}/unpacked" ] || mkdir -p "${1}/unpacked"
1205+ cat << EOF > "${1}/unpacked/build-info.txt"
1206+serial=${serial}
1207+orig_prefix=${DISTRO}-${BTYPE}-cloudimg
1208+suite=${DISTRO}
1209+build_name=${BTYPE}
1210+EOF
1211+ make_hwe_meta ${serial}
1212+ exit 0
1213+}
1214+
1215+$(stat /tmp/${DISTRO}-${BUILD_ID} > /dev/null 2>&1) && {
1216+ echo "Another builder is/has reserved this part of the build. Deferring..."
1217+ while [ -z "${destdir}" ]
1218+ do
1219+ sleep 5
1220+ finaldir=""
1221+
1222+ [ -e "${WORKSPACE}/serial.txt" ] && {
1223+ read serial < "${WORKSPACE}/serial.txt"
1224+ destdir="${base_d}/${serial}"
1225+ }
1226+
1227+ while read destdir
1228+ do
1229+ echo "Candidate serial found: ${destdir##*/}"
1230+ finaldir="${destdir}"
1231+ done < /tmp/${DISTRO}-${BUILD_ID}
1232+
1233+ if [ -n "${finaldir}" ]; then
1234+ echo "Aggregation directory reported as ${finaldir}"
1235+ echo "${finaldir##*/}" > "${WORKSPACE}/serial.txt"
1236+ make_hwe_meta "${finaldir##*/}"
1237+ exit 0
1238+ else
1239+ echo "destdir is not defined!" && exit 10
1240+ fi
1241+
1242+ done
1243+}
1244+
1245+# if we get here, then know that the build dir hasn't been created yet
1246+touch /tmp/${DISTRO}-$BUILD_ID
1247+test_base_d="${base_d}/$(date +%Y%m%d)"
1248+
1249+make_and_write() {
1250+ serial="${1##*/}"
1251+ echo "Creating aggregation directory ${1}"
1252+ echo "${serial}" > "${WORKSPACE}/serial.txt"
1253+ mkdir -p "${1}" &&
1254+ echo "${1}" >> /tmp/${DISTRO}-$BUILD_ID ||
1255+ exit 10
1256+
1257+ # Copy stuff to where it should go
1258+ make_meta "${1}"
1259+}
1260+
1261+if [ ! -d "${test_base_d}" ]; then
1262+ make_and_write "${test_base_d}"
1263+else
1264+ for bs in {1..30}
1265+ do
1266+ base_nd="${test_base_d}.${bs}"
1267+ serial="${base_nd##*/}"
1268+ echo "Checking on directory ${base_nd}"
1269+ [ ! -d "${base_nd}" ] && make_and_write "${base_nd}"
1270+ make_hwe_meta "${serial}"
1271+ done
1272+fi
1273+
1274+exit 0
1275
1276=== added directory 'jenkins'
1277=== added file 'jenkins/CloudImages_Azure.sh'
1278--- jenkins/CloudImages_Azure.sh 1970-01-01 00:00:00 +0000
1279+++ jenkins/CloudImages_Azure.sh 2018-05-31 04:33:07 +0000
1280@@ -0,0 +1,162 @@
1281+#!/bin/bash
1282+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
1283+
1284+# set default umask
1285+umask 022
1286+
1287+# Pre-setup: Read the build properties from the previous build
1288+# and discard what we don't want
1289+[ -e build.info ] && cp build.info build_properties
1290+source build_properties
1291+
1292+
1293+# Load up some libraries
1294+my_dir="$( cd "$( dirname "$0" )" && pwd )"
1295+base_dir=$(dirname ${my_dir})
1296+source "${base_dir}/functions/locker"
1297+source "${base_dir}/functions/common"
1298+source "${base_dir}/functions/retry"
1299+
1300+debug() { echo "${@}"; }
1301+fail() { echo "${@}" 2>&1; exit 1;}
1302+
1303+
1304+# Shuffle stuff around"
1305+[ -e build_properties ] && mv build_properties parent_build_properties
1306+[ -e build.log ] && mv build.log parent_build.log
1307+[ -e "${SUITE}-build.sh" ] && rm "${SUITE}-build.sh"
1308+
1309+echo "-------------------"
1310+echo " Distro: ${SUITE}"
1311+echo " Serial: ${SERIAL}"
1312+echo " Type: ${BUILD_TYPE}"
1313+echo "-------------------"
1314+
1315+set -x
1316+# Variables
1317+case ${SUITE} in
1318+ precise|trusty|wily|xenial)
1319+ disk_name="${SUITE}-server-cloudimg-amd64-disk1.img"
1320+ raw_name="${PWD}/${SUITE}-server-cloudimg-amd64-disk1.raw"
1321+ vhd_name="${PWD}/${SUITE}-server-cloudimg-amd64-disk1.vhd"
1322+ ;;
1323+ *)
1324+ disk_name="${SUITE}-server-cloudimg-amd64.img"
1325+ raw_name="${PWD}/${SUITE}-server-cloudimg-amd64.raw"
1326+ vhd_name="${PWD}/${SUITE}-server-cloudimg-amd64.vhd"
1327+ ;;
1328+esac
1329+disk_root="${DISK_ROOT:-/srv/ec2-images/${SUITE}/${SERIAL}}"
1330+raw_disk="${PWD}/results.raw"
1331+launch_config="${PWD}/launch_config.sh"
1332+register_config="${PWD}/register_config.sh"
1333+pkg_tar="${PWD}/pkg.tar"
1334+pkg_tar_d="${PKG_TAR_D:-${kvm_builder}/azure_pkgs}"
1335+proposed="${PROPOSED:-false}"
1336+vhd_size=${VHD_SIZE:-30}
1337+
1338+# Covert image to a RAW disk to work with. The raw image is used
1339+# to populate the daily VHD in Azure
1340+debug "Converting QCow2 to Raw Disk"
1341+qemu-img \
1342+ convert -O raw \
1343+ "${disk_root}/${disk_name}" \
1344+ "${raw_name}" &&
1345+ debug "Converted QCow2 to Raw disk for manipulation" ||
1346+ fail "Failed to convert QCow2 to Raw disk"
1347+
1348+config_opts=(${CONFIG_OPTS})
1349+config_opts+=(
1350+ --version $(${kvm_builder}/ubuntu-adj2version ${SUITE})
1351+ --serial "${SERIAL}"
1352+ --out "${launch_config}"
1353+ )
1354+
1355+# Turns on building from proposed
1356+[ "${proposed}" == "true" ] &&
1357+ config_opts+=(--proposed)
1358+
1359+# Setup the configuration
1360+${kvm_builder}/azure_config.sh \
1361+ ${config_opts[@]} ||
1362+ fail "Failed to configure instance runtime"
1363+
1364+# Full disk populate for 12.04
1365+root_size=2
1366+if [ "${SUITE}" == "precise" ]; then
1367+ root_size=29
1368+ truncate -s 29G "${raw_name}.pre-vhd" &&
1369+ debug "Resized 12.04 image to full size" ||
1370+ fail "Failed to resize 12.04 to full size"
1371+fi
1372+
1373+case ${SUITE} in
1374+ precise|trusty|xenial)
1375+ builder_img=/srv/builder/images/precise-builder-latest.img
1376+ ;;
1377+ *)
1378+ builder_img=/srv/builder/images/artful-builder-latest.img
1379+ ;;
1380+esac
1381+
1382+# Launch KVM to do the work
1383+${kvm_builder}/launch_kvm.sh \
1384+ --id ${BUILD_ID} \
1385+ --user-data "${launch_config}" \
1386+ --cloud-config "${kvm_builder}/config/cloud-azure.cfg" \
1387+ --extra-disk "${raw_name}" \
1388+ --raw-disk "${WORKSPACE}/${SUITE}-output.raw" \
1389+ --raw-size ${root_size} \
1390+ --img-url ${builder_img} ||
1391+ fail "KVM instance failed to build image."
1392+
1393+rm "${WORKSPACE}/${SUITE}-output.raw"
1394+
1395+
1396+# Copy the raw image to make it ready for VHD production
1397+cp --sparse=always "${raw_name}" "${raw_name}.pre-vhd" &&
1398+ debug "Copied raw image VHD production" ||
1399+ fail "Failed to copy raw image to ${raw_name}.pre-vhd"
1400+
1401+# Resize the copied RAW image
1402+debug "Truncating image to ${vhd_size}G"
1403+truncate -s "${vhd_size}G" "${raw_name}.pre-vhd" &&
1404+ debug "Truncated image at ${vhd_size}G" ||
1405+ fail "Failed to truncate disk image"
1406+
1407+# Convert to VHD first, step 1 of cheap hack
1408+# This is a cheap hack...half the time the next command
1409+# will fail with "VERR_INVALID_PARAMETER", so this is the,
1410+# er, workaround
1411+debug "Converting to VHD"
1412+_vbox_cmd convertfromraw --format VHD \
1413+ "${raw_name}.pre-vhd" \
1414+ "${vhd_name}.pre" &&
1415+ debug "Converted raw disk to VHD" ||
1416+ fail "Failed to convert raw image to VHD"
1417+
1418+# Clone the disk to fixed, VHD for Azure
1419+debug "Converting to VHD format from raw..."
1420+debug ".....this might take a while...."
1421+_vbox_cmd clonehd --format VHD --variant Fixed \
1422+ "${vhd_name}.pre" \
1423+ "${vhd_name}" &&
1424+ debug "Converted raw disk to VHD format using VirtualBox" ||
1425+ fail "Failed to convert raw image to VHD disk!"
1426+
1427+# Remove the unneeded files
1428+rm "${vhd_name}.pre" "${raw_name}.pre-vhd"
1429+
1430+debug "Image Characteristics:"
1431+_vbox_cmd showhdinfo "${vhd_name}"
1432+
1433+
1434+debug "Raw image converted to VHD"
1435+
1436+# Archive the bzip2 file
1437+#debug "Archiving the VHD image"
1438+#pbzip2 -f "${vhd_name}" &&
1439+# debug "Created archive of the VHD image" ||
1440+# fail "Failed to compress image"
1441+
1442+exit 0
1443
1444=== added file 'jenkins/CloudImages_Base.sh'
1445--- jenkins/CloudImages_Base.sh 1970-01-01 00:00:00 +0000
1446+++ jenkins/CloudImages_Base.sh 2018-05-31 04:33:07 +0000
1447@@ -0,0 +1,96 @@
1448+#!/bin/bash
1449+
1450+# Set default umask
1451+umask 022
1452+
1453+DISTRO=${DISTRO:-$SUITE}
1454+DISTRO=${DISTRO:?Must define distro}
1455+build_config="${PWD}/${DISTRO}-build.sh"
1456+
1457+# Read in the common functions
1458+my_dir="$( cd "$( dirname "$0" )" && pwd )"
1459+base_dir=$(dirname ${my_dir})
1460+source "${base_dir}/functions/locker"
1461+source "${base_dir}/functions/common"
1462+source "${base_dir}/functions/retry"
1463+source "${my_dir}/build_lib.sh"
1464+select_build_config
1465+
1466+# Only block for serial if serial is unknown
1467+[ -z "${SERIAL}" ] && {
1468+ # Get the serial number
1469+ retry 3 10 \
1470+ "${base_dir}/get_serial.sh" \
1471+ "${DISTRO}" "${WORKSPACE}" "${BUILD_ID}" "${BTYPE}" 0 0 1 1 ||
1472+ fail "Failed to get serial for this build"
1473+
1474+ # Get the serial number
1475+ read SERIAL < serial.txt
1476+ [ -z ${SERIAL} ] && echo "NO SERIAL" && exit 10
1477+}
1478+
1479+# Create the configurations
1480+cmd=("${base_dir}/builder_config.sh"
1481+ --distro "${DISTRO}"
1482+ --build-type "${BTYPE}"
1483+ --arch "${ARCH_TYPE}"
1484+ --template ${base_dir}/templates/img-build.tmpl
1485+ --serial "${SERIAL}"
1486+ --out "${build_config}"
1487+)
1488+
1489+# Allow building from proposed
1490+[ "${PROPOSED_BUILD:-0}" -eq 1 ] && cmd+=("--proposed")
1491+[ "${USE_BUILDDS:-0}" -eq 1 ] && cmd+=("--bzr-automated-builds lp:~ubuntu-on-ec2/vmbuilder/automated-ec2-builds-buildd")
1492+[ -n "${BZR_AUTOMATED_EC2}" ] && cmd+=("--bzr-automated-builds ${BZR_AUTOMATED_EC2}")
1493+[ -n "${BZR_PUBSCRIPTS}" ] && cmd+=("--bzr-pubscripts ${BZR_PUBSCRIPTS}")
1494+[ -n "${BZR_LIVEBUILD}" ] && cmd+=("--bzr-livebuild ${BZR_LIVEBUILD}")
1495+[ -n "${BZR_VMBUILDER}" ] && cmd+=("--bzr-vmbuilder ${BZR_VMBUILDER}")
1496+
1497+# Do the build
1498+${cmd[@]} || fail "Failed to configure instance configuration"
1499+unset cmd
1500+
1501+# Exit after configuring for arm if so configured
1502+if [[ "${ARCH_TYPE}" =~ (arm|aarch64|arm64) ]]; then
1503+ echo "This is an ARM build. ARM rules will apply"
1504+ [ "${BUILD_ARM}" -eq 0 ] && exit 0
1505+fi
1506+
1507+# Launch the builder
1508+# Retry building the image twice, waiting five
1509+# minutes. This should buffer most failures caused
1510+# by bad mirrors.
1511+export MAX_CYCLES=2160
1512+retry 2 300 \
1513+ "${base_dir}/launch_kvm.sh" \
1514+ --id "${BUILD_ID}" \
1515+ --user-data "${build_config}" \
1516+ --cloud-config "${base_dir}/config/${cloud_init_cfg}" \
1517+ --img-url "${BUILDER_CLOUD_IMAGE}" \
1518+ --raw-disk "${WORKSPACE}/${DISTRO}.raw" \
1519+ --raw-size 20 ||
1520+ fail "KVM instance failed"
1521+
1522+tar -xvvf "${WORKSPACE}/${DISTRO}.raw" ||
1523+ fail "Result tar failed to unpack"
1524+
1525+rm "${WORKSPACE}/${DISTRO}.raw" ||
1526+ fail "Failed to remove unnecessary file"
1527+
1528+# Put the bits in place
1529+"${base_dir}/copy_to_final.sh" \
1530+ "${DISTRO}" \
1531+ "${WORKSPACE}" \
1532+ "${SERIAL}" \
1533+ "${BTYPE}" \
1534+ "${TEST_BUILD}" \
1535+ "${SANDBOX_BUILD}" \
1536+ "${PROPOSED_BUILD}" ||
1537+ fail "Failed to place final files to destination"
1538+
1539+# Copy the build properties into the workspace. This is set by get_serial.sh
1540+[ "${BUILD_PROPERTIES}" != "${WORKSPACE}/build_properties" ] &&
1541+ cp ${BUILD_PROPERTIES} ${WORKSPACE}/build_properties
1542+
1543+echo "ARCH=${ARCH_TYPE}" >> build_properties
1544
1545=== added file 'jenkins/CloudImages_Base_Release_Delta.sh'
1546--- jenkins/CloudImages_Base_Release_Delta.sh 1970-01-01 00:00:00 +0000
1547+++ jenkins/CloudImages_Base_Release_Delta.sh 2018-05-31 04:33:07 +0000
1548@@ -0,0 +1,255 @@
1549+#!/bin/bash -x
1550+
1551+# Set default umask
1552+umask 022
1553+
1554+# Skip promotion if this file exists
1555+HOLIDAY_FILE=/srv/jenkins/HOLIDAY
1556+
1557+# Write the build properties file
1558+cat << EOF > "${WORKSPACE}/build_properties"
1559+SUITE=${SUITE}
1560+STREAM=${STREAM}
1561+SERIAL=${SERIAL}
1562+BUILD_TYPE=${BUILD_TYPE}
1563+
1564+EOF
1565+
1566+# Write the environmental variables to the run file
1567+env > ${SUITE}.run
1568+
1569+fail() { echo "$@"; exit 1;}
1570+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
1571+
1572+arches=(i386 amd64 armel armhf arm64 ppc64el)
1573+exec_c="/srv/builder/vmbuilder/bin/cronrun"
1574+rel_base="/srv/ec2-images/releases/${SUITE}/release"
1575+rel_link=$(readlink ${rel_base})
1576+
1577+[ "${BUILD_TYPE}" = "desktop" ] &&
1578+ echo "Not valid for desktop builds" &&
1579+ exit 0
1580+
1581+# Find the existing manifest file
1582+old_manifest=$(find -L ${rel_base} -maxdepth 1 -iname '*amd64.manifest') ||
1583+ echo "Unable to find release manifest file"
1584+
1585+# Find the new manifest file
1586+new_manifest_d="/srv/ec2-images/${SUITE}/${SERIAL}"
1587+[ "${TEST_BUILD:-0}" -eq 1 ] && new_manifest_d="/srv/ec2-images/test_builds/${SUITE}/${SERIAL}"
1588+[ "${SANDBOX_BUILD:-0}" -eq 1 ] && new_manifest_d="/srv/ec2-images/sandbox/${SUITE}/${SERIAL}"
1589+new_manifest=$(find ${new_manifest_d} -maxdepth 1 -iname '*amd64.manifest') ||
1590+ fail "Unable to find new manifest file"
1591+
1592+# Find the previous serial if there was one
1593+previous_serial=$(find /srv/ec2-images/${SUITE}/ -maxdepth 1 -type d |\
1594+ awk -F\/ '{print$NF}' | sort -rn | grep "." | grep -v "${SERIAL}" | head -n1) ||
1595+ echo "Unable to find prior daily manifest"
1596+
1597+previous_manifest=${new_manifest//$SERIAL/$previous_serial}
1598+
1599+# Generate the pure package diffs
1600+for arch in "${arches[@]}"
1601+do
1602+ nm=${new_manifest//amd64/$arch}
1603+ om=${old_manifest//amd64/$arch}
1604+ pm=${previous_manifest/amd64/$arch}
1605+
1606+ [ -e "${nm}" ] &&
1607+ cp "${nm}" "${WORKSPACE}/manifest-${arch}-daily-${SERIAL}.txt"
1608+
1609+ # Generate the diff from daily to release
1610+ if [ -e "${nm}" -a -e "${om}" ]; then
1611+ release_diff=${new_manifest##*/}
1612+ release_diff=${release_diff//.manifest/-$rel_link-to-daily_manifest.diff}
1613+ release_diff=${release_diff//amd64/$arch}
1614+ diff -u ${om} ${nm} > "${WORKSPACE}/${release_diff}"
1615+ cp ${om} "${WORKSPACE}/manifest-${arch}-release.txt"
1616+ fi
1617+
1618+ # Generate the diff from daily to old daily
1619+ if [ -e "${nm}" -a -e "${pm}" ]; then
1620+ daily_diff=${new_manifest##*/}
1621+ daily_diff=${daily_diff//.manifest/-$previous_serial-to-$SERIAL-manifest.diff}
1622+ daily_diff=${daily_diff//amd64/$arch}
1623+ diff -u ${pm} ${nm} > "${WORKSPACE}/${daily_diff}"
1624+ cp ${pm} "${WORKSPACE}/manifest-${arch}-previous_daily-${previous_serial}.txt"
1625+ fi
1626+done
1627+
1628+# Determine if there is a different version of a particular package.
1629+# If so, write out the package information to the specified trigger file
1630+package_differences() {
1631+ # If a pattern is used, it should match a single entry in the manifest
1632+ package_pattern=$1
1633+ trigger_file=$2
1634+
1635+ echo "Checking for differences in ${package_pattern}"
1636+
1637+ v1="$(awk "/${package_pattern}/ {print\$NF}" ${old_manifest})"
1638+ v2="$(awk "/${package_pattern}/ {print\$NF}" ${new_manifest})"
1639+
1640+ if [ "x${v1}" != "x${v2}" ]; then
1641+ echo " Package changed old:${v1}, new:${v2}"
1642+ cat << PKGDIFF >> ${trigger_file}
1643+'${package_pattern}':
1644+- old: '${v1}'
1645+- new: '${v2}'
1646+
1647+PKGDIFF
1648+ else
1649+ echo " No difference old:${v1}, new:${v2}"
1650+ fi
1651+}
1652+
1653+# Set packages to trigger an automated promotion in this array
1654+# This list of packages is controlled through application of
1655+# lp:~cloudware/cpc-core/+git/cpc_policy:policies/0003_automated_daily_promotion.rst
1656+# trigger_set MUST NOT be modified without accompanying policy doc change
1657+declare -a trigger_set
1658+# This is an array of package names where the string is any awk-friendly
1659+# pattern supported by the expression in package_differences(), but it must
1660+# only match a single package
1661+#trigger_set=('example_package' 'example_package2-*')
1662+trigger_set=('pollinate')
1663+
1664+# Append the kernel package to the trigger_set array
1665+if dist_ge ${SUITE} quantal; then
1666+ trigger_set[${#trigger_set[@]}]='linux-image.*generic'
1667+else
1668+ trigger_set[${#trigger_set[@]}]='linux-image-virtual'
1669+fi
1670+
1671+# For legacy reasons the jenkins jobs use a "kernel" trigger file
1672+# for automated build promotion. All package changes will
1673+# use this single trigger file until the need arises for more
1674+# granularity.
1675+trigger_file="${WORKSPACE}/${SUITE}-kernel-trigger"
1676+if [ -e "${trigger_file}" ] ; then
1677+ echo "Cleaning up old trigger file in workspace"
1678+ rm --verbose "${trigger_file}"
1679+fi
1680+
1681+# Check all packages in the trigger_set array
1682+for pkg in ${trigger_set[@]} ; do
1683+ package_differences "${pkg}" "${trigger_file}"
1684+done
1685+
1686+# If the trigger file exists, determine if it should be pushed to the
1687+# build trigger directory for action
1688+if [ -e "${trigger_file}" ] ; then
1689+ if [ ! -e "${rel_base}" ]; then
1690+ echo "${SUITE} not released, not triggering"
1691+ rm --verbose "${trigger_file}"
1692+ elif [ ! -f ${HOLIDAY_FILE} ]; then
1693+ echo "Creating trigger file with contents:"
1694+ cat "${trigger_file}"
1695+ cp --verbose "${trigger_file}" \
1696+ "${TRIGGER_LOCATION:-/srv/builder/triggers/kernel}/${PARENT_BUILDER_ID}.trigger"
1697+ else
1698+ echo "Not creating trigger, ${HOLIDAY_FILE} found"
1699+ if [ -e "${TRIGGER_LOCATION:-/srv/builder/triggers/kernel}/${PARENT_BUILDER_ID}.trigger" ]; then
1700+ echo "Removing existing trigger from old build with same parent ID."
1701+ rm --verbose "${TRIGGER_LOCATION:-/srv/builder/triggers/kernel}/${PARENT_BUILDER_ID}.trigger"
1702+ fi
1703+ fi
1704+else
1705+ echo "No trigger file found"
1706+fi
1707+
1708+# Copy the diffs into the current workspace
1709+cp ${WORKSPACE}/*.diff ${new_manifest_d}/unpacked
1710+
1711+# Generate the mfdiff between the dailies
1712+[ -e "${previous_manifest}" -a -e "${new_manifest}" ] &&
1713+ ${exec_c} mfdiff amd64 ${SUITE} ${previous_manifest} ${new_manifest} >\
1714+ "${WORKSPACE}/${SUITE}-daily.changelog"
1715+
1716+# Generate the diff between daily and the released image
1717+[ -e "${old_manifest}" -a -e "${new_manifest}" ] &&
1718+ ${exec_c} mfdiff amd64 ${SUITE} ${old_manifest} ${new_manifest} >\
1719+ "${WORKSPACE}/${SUITE}-${rel_link}-to-daily.changelog"
1720+
1721+# Copy the changelogs into the current workspace
1722+cp ${WORKSPACE}/*.changelog ${new_manifest_d}/unpacked
1723+
1724+# The rest of the operations are for released images only
1725+[ ! -e "${rel_base}" ] &&
1726+ echo "No current release, aborting comparison" &&
1727+ exit 0
1728+
1729+# Tar up the deltas
1730+tar -C ${WORKSPACE} -jcvf "${WORKSPACE}/${SUITE}-${SERIAL}.tar.bz2" \
1731+ *.changelog \
1732+ *.txt \
1733+ *.diff ||
1734+ fail "Failed to create tarball"
1735+
1736+# Start the email report work
1737+changed_pkgs=$(grep '=>' ${SUITE}-${rel_link}-to-daily.changelog | \
1738+ sed -e 's,====,,g' -e 's,^, *,g' | sort -k2)
1739+
1740+# Generate the email template
1741+VER=$(${kvm}/ubuntu-adj2version ${SUITE})
1742+
1743+case ${VER} in
1744+ *8.04*) VER="${VER} LTS";
1745+ CODENAME="Hardy Heron";;
1746+ *10.04*) VER="${VER} LTS";
1747+ CODENAME="Lucid Lynx";;
1748+ *11.04*) CODENAME="Natty Narwhal";;
1749+ *11.10*) CODENAME="Oneiric Ocelot";;
1750+ *12.04*) VER="${VER} LTS";
1751+ CODENAME="Precise Pangolin";;
1752+ *12.10*) CODENAME="Quantal Queztal";;
1753+ *13.04*) CODENAME="Raring Ringtail";;
1754+ *13.10*) CODENAME="Saucy Salamander";;
1755+ *14.04*) VER="${VER} LTS";
1756+ CODENAME="Trusty Tahr";;
1757+ *14.10*) CODENAME="Utopic Unicorn";;
1758+ *15.04*) CODENAME="Vivid Vervet";;
1759+ *15.10*) CODENAME="Wily Werewolf";;
1760+esac
1761+
1762+email_name="${WORKSPACE}/${SUITE}-release_announcement.email"
1763+cat << EOF > "${email_name}"
1764+SUBJECT: Refreshed Cloud Images of ${VER} (${CODENAME}) [${SERIAL}]
1765+TO: ec2ubuntu@googlegroups.com; ubuntu-cloud@lists.ubuntu.com; ubuntu-cloud-announce@lists.ubuntu.com
1766+
1767+A new release of the Ubuntu Cloud Images for stable Ubuntu release ${VER} (${CODENAME}) is available at [1]. These new images superseded the existing images [2]. Images are available for download or immediate use on EC2 via publish AMI ids. Users who wish to update their existing installations can do so with:
1768+ 'sudo apt-get update && sudo apt-get dist-upgrade && sudo reboot'.
1769+
1770+EOF
1771+
1772+if [ "${old_linux_kernel}" != "${new_linux_kernel}" ]; then
1773+ cat << EOF >> "${email_name}"
1774+The Linux kernel was updated from ${old_linux_kernel} [3] to ${new_linux_kernel} [4]
1775+
1776+EOF
1777+fi
1778+
1779+cat << EOF >> "${email_name}"
1780+The following packages have been updated. Please see the full changelogs
1781+for a complete listing of changes:
1782+${changed_pkgs}
1783+
1784+
1785+The following is a complete changelog for this image.
1786+$(cat ${SUITE}-${rel_link}-to-daily.changelog)
1787+
1788+--
1789+[1] http://cloud-images.ubuntu.com/releases/${SUITE}/release-${SERIAL}/
1790+[2] http://cloud-images.ubuntu.com/releases/${SUITE}/${rel_link}/
1791+EOF
1792+
1793+if [ "${old_linux_kernel}" != "${new_linux_kernel}" ]; then
1794+cat << EOF >> "${email_name}"
1795+[3] http://changelogs.ubuntu.com/changelogs/pool/main/l/linux/linux_${old_linux_kernel}/changelog
1796+[4] http://changelogs.ubuntu.com/changelogs/pool/main/l/linux/linux_${new_linux_kernel}/changelog
1797+EOF
1798+fi
1799+
1800+# Create release notes
1801+lnc=$(wc -l ${email_name} | awk '{print$1}')
1802+tail -n `expr $lnc - 3` ${email_name} > "${WORKSPACE}/release_notes.txt"
1803+cp ${WORKSPACE}/release_notes.txt ${new_manifest_d}/unpacked
1804
1805=== added file 'jenkins/CloudImages_Juju.sh'
1806--- jenkins/CloudImages_Juju.sh 1970-01-01 00:00:00 +0000
1807+++ jenkins/CloudImages_Juju.sh 2018-05-31 04:33:07 +0000
1808@@ -0,0 +1,253 @@
1809+#!/bin/bash
1810+
1811+# Set default umask
1812+umask 022
1813+
1814+# Read in the common files
1815+my_name=$(readlink -f ${0})
1816+my_dir=$(dirname ${my_name})
1817+my_pdir=$(dirname ${my_dir})
1818+
1819+# Source in the common functions
1820+source "${my_pdir}/functions/common"
1821+source "${my_pdir}/functions/retry"
1822+source "${my_pdir}/functions/locker"
1823+export HOME=${WORKSPACE}
1824+
1825+# needed for building on Jenkins
1826+[ -e "build_properties" ] && source build_properties
1827+
1828+# Copy the target disk imags
1829+ARCH_TYPE=${ARCH_TYPE:-$ARCH}
1830+disk_orig="${SUITE}-server-cloudimg-${ARCH_TYPE}-disk1.img"
1831+disk_cp="${disk_orig//$ARCH_TYPE/$ARCH_TYPE-juju-vagrant}"
1832+disk_root="${SRV_D:-/srv/ec2-images}/${SUITE}/${SERIAL:-current}"
1833+disk_working="${WORKSPACE}/${disk_cp}"
1834+final_disk="${WORKSPACE}/box-disk1.vdi"
1835+final_location="${OUTPUT_D:-/srv/ec2-images}/vagrant/${SUITE}/${SERIAL}"
1836+box_name="${disk_working//.img/.box}"
1837+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
1838+build_host_suite=$(lsb_release -c -s)
1839+
1840+jenkins_build() {
1841+ [ -e "build_properties" ] &&
1842+ source build_properties ||
1843+ fail "Failed to read build_properties. I don't know what I'm doing!"
1844+
1845+ # Bail if something isn't right
1846+ SUITE=${SUITE:?Suite must be defined}
1847+ SERIAL=${SERIAL:?Serial must be defined}
1848+
1849+ cp "${disk_root}/${disk_orig}" "${disk_working}" ||
1850+ fail "Unable to copy ${disk_orig} from ${disk_root}"
1851+
1852+ qemu-img resize ${disk_working} 40G
1853+
1854+ # Launch KVM to do the worK
1855+ ${my_pdir}/launch_kvm.sh \
1856+ --id "${ARCH_TYPE}-${BUILD_ID}" \
1857+ --user-data "${my_pdir}/config/cloud-vps.cfg" \
1858+ --cloud-config "${my_pdir}/templates/img-juju.tmpl" \
1859+ --extra-disk "${disk_working}" \
1860+ --disk-gb 1 \
1861+ --raw-disk "${raw_f}" \
1862+ --raw-size 1 \
1863+ --img-url /srv/builder/images/precise-builder-latest.img ||
1864+ fail "KVM instance failed to build image."
1865+}
1866+
1867+# Assume that we're building in Jenkins unless otherwise stated
1868+# What this allows us to do is to use the standalone builder for testing
1869+# and finish running the bits below
1870+[ "${LOCAL_BUILD:-0}" -eq 1 ] || jenkins_build
1871+
1872+# Covert to VMDK.
1873+qemu-img convert -O raw ${disk_working} ${disk_working//.img/.raw}
1874+
1875+_vbox_cmd convertfromraw \
1876+ --format vdi \
1877+ ${disk_working//.img/.raw} ${final_disk}
1878+
1879+# Create the VM
1880+vmname="ubuntu-cloudimg-${SUITE}-juju-vagrant-${ARCH_TYPE}"
1881+_vbox_cmd modifyhd --compact ${final_disk}
1882+
1883+dist_v="Ubuntu"
1884+[ "${ARCH_TYPE}" = "amd64" ] && dist_v="Ubuntu_64"
1885+_vbox_cmd createvm \
1886+ --name ${vmname} \
1887+ --ostype ${dist_v} \
1888+ --register
1889+
1890+_vbox_cmd modifyvm ${vmname} \
1891+ --memory 2048 \
1892+ --boot1 disk \
1893+ --boot2 none \
1894+ --boot3 none \
1895+ --boot4 none \
1896+ --vram 12 \
1897+ --pae off \
1898+ --acpi on \
1899+ --ioapic on \
1900+ --rtcuseutc on \
1901+ --bioslogodisplaytime 0 \
1902+ --nic1 nat \
1903+ --nictype1 virtio
1904+
1905+if [ "${ARCH_TYPE}" = "i386" ]; then
1906+ _vbox_cmd modifyvm ${vmname} \
1907+ --ioapic off \
1908+ --pae on
1909+fi
1910+
1911+
1912+_vbox_cmd modifyvm ${vmname} --natpf1 "guestssh,tcp,,2222,,22"
1913+
1914+storage_cmd=(
1915+ _vbox_cmd storagectl "${vmname}"
1916+ --name "SATAController"
1917+ --add sata
1918+ --controller IntelAhci
1919+ --hostiocache on
1920+ )
1921+
1922+if [ "$(lsb_release -r -s | sed 's/\.//')" -lt 1404 ]; then
1923+ storage_cmd+=(--sataportcount 1)
1924+else
1925+ storage_cmd+=(--portcount 1)
1926+fi
1927+
1928+${storage_cmd[@]}
1929+
1930+_vbox_cmd storageattach ${vmname} \
1931+ --storagectl "SATAController" \
1932+ --port 0 \
1933+ --device 0 \
1934+ --type hdd \
1935+ --medium ${final_disk}
1936+
1937+# Set extra-data
1938+_vbox_cmd setextradata ${vmname} installdate ${serial}
1939+_vbox_cmd setextradata ${vmname} supported false
1940+
1941+# Set the Guest information to get rid of error message
1942+[ -e vagrant_image.pkgs ] && {
1943+
1944+ vbox_version=""
1945+ while read -r line
1946+ do
1947+ line=( $(echo ${line}) )
1948+ [[ ${line[0]} =~ virtualbox-guest-utils ]] && vbox_version=${line[1]}
1949+ done < vagrant_image.pkgs
1950+ debug "Guest Additions version is ${vbox_version}"
1951+
1952+ # Set the revision to some arbitrary value
1953+ _vbox_cmd guestproperty set ${vmname} \
1954+ "/VirtualBox/GuestAdd/Revision" '8000'
1955+
1956+ # Set the Ubuntu packaged version correctly
1957+ _vbox_cmd guestproperty set ${vmname} \
1958+ "/VirtualBox/GuestAdd/VersionExt" \
1959+ "${vbox_version//-dfsg-*/_Ubuntu}"
1960+
1961+ # Set the version string appropriately
1962+ _vbox_cmd guestproperty set ${vmname} \
1963+ "/VirtualBox/GuestAdd/Version" \
1964+ "${vbox_version//-dfsg-*/}"
1965+}
1966+
1967+mkdir ${WORKSPACE}/box
1968+_vbox_cmd export ${vmname} --output ${WORKSPACE}/box/box.ovf
1969+
1970+# Create the Vagrant file
1971+#macaddr="02:$(openssl rand -hex 5)"
1972+macaddr=$(awk '-F"' '/<Adapter slot="0" enabled="true"/ {print$6}' ${WORKSPACE}/box/box.ovf)
1973+cat << EOF > ${WORKSPACE}/box/Vagrantfile
1974+\$script = <<SCRIPT
1975+bzr branch lp:jujuredirector /tmp/jujuredir
1976+
1977+if ! grep precise /etc/lsb-release > /dev/null; then
1978+ cat << EOM > "/etc/apt/apt.conf.d/90proxy"
1979+Acquire::http::Proxy "http://10.0.3.1:8000";
1980+EOM
1981+
1982+ for series in precise trusty; do
1983+ version=\$(grep \$series /usr/share/distro-info/ubuntu.csv | cut -d, -f1 | cut -d' ' -f1)
1984+ expected_filename=/var/cache/lxc/cloud-\${series}/ubuntu-\${version}-server-cloudimg-${ARCH_TYPE}-root.tar.gz
1985+ if [ ! -e \$expected_filename ]; then
1986+ mkdir -p "/var/cache/lxc/cloud-\${series}"
1987+ curl -o "\$expected_filename" \
1988+ http://cloud-images.ubuntu.com/releases/\${series}/release/ubuntu-\${version}-server-cloudimg-${ARCH_TYPE}-root.tar.gz
1989+ fi
1990+ done
1991+
1992+ # Set up squid in the LXC template
1993+ for lxc_template in \$(ls /var/cache/lxc/cloud-*/*-root.tar.gz); do
1994+ gunzip "\$lxc_template"
1995+ unwrapped_name=\$(dirname "\$lxc_template")/\$(basename "\$lxc_template" .gz)
1996+ mkdir -p etc/apt/apt.conf.d
1997+ echo 'Acquire::http::Proxy "http://10.0.3.1:8000";' > etc/apt/apt.conf.d/90proxy
1998+ tar rf "\$unwrapped_name" etc/apt/apt.conf.d/90proxy
1999+ gzip "\$unwrapped_name"
2000+ rm -rf etc
2001+ done
2002+fi
2003+
2004+bash /tmp/jujuredir/setup-juju.sh 6079
2005+echo "export JUJU_REPOSITORY=/charms" >> /home/vagrant/.bashrc
2006+SCRIPT
2007+
2008+system 'mkdir', '-p', 'charms'
2009+
2010+Vagrant.configure("2") do |config|
2011+ # This Vagrantfile is auto-generated by 'vagrant package' to contain
2012+ # the MAC address of the box. Custom configuration should be placed in
2013+ # the actual 'Vagrantfile' in this box.
2014+
2015+ config.vm.base_mac = "${macaddr}"
2016+ config.vm.network :forwarded_port, guest: 22, host: 2122, host_ip: "127.0.0.1"
2017+ config.vm.network :forwarded_port, guest: 80, host: 6080, host_ip: "127.0.0.1"
2018+ config.vm.network :forwarded_port, guest: 6079, host: 6079, host_ip: "127.0.0.1"
2019+ config.vm.network "private_network", ip: "172.16.250.15"
2020+ config.vm.provider "virtualbox" do |vb|
2021+ vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
2022+ end
2023+ config.vm.provision "shell", inline: \$script
2024+
2025+ config.vm.synced_folder "charms/", "/charms"
2026+end
2027+
2028+# Load include vagrant file if it exists after the auto-generated
2029+# so it can override any of the settings
2030+include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__)
2031+load include_vagrantfile if File.exist?(include_vagrantfile)
2032+EOF
2033+
2034+# Now pack it all up....
2035+tar -C ${WORKSPACE}/box -Scvf ${box_name} box.ovf Vagrantfile box-disk1.vmdk ||
2036+ fail "Unable to create box file"
2037+
2038+# Some minor cleanup
2039+rm ${disk_working} ${disk_working//.img/.raw} || /bin/true
2040+rm -rf ${WORKSPACE}/box *.vdi
2041+[ -e "${raw_f}" ] && rm "${raw_f}"
2042+
2043+# Bail here if this is a local build
2044+[ "${LOCAL_BUILD:-0}" -eq 1 ] && exit 0
2045+
2046+# Put the box in place
2047+mkdir -p "${final_location}" ||
2048+ fail "Unable to create the vagrant image location"
2049+
2050+cp ${box_name} ${final_location} ||
2051+ fail "Failed to place vagrant image in final home"
2052+
2053+# Now Checksum it all
2054+
2055+# Override and set some home variables
2056+export HOME="/srv/builder"
2057+export CDIMAGE_BIN="${HOME}/cdimage/bin"
2058+PUBLISH_SCRIPTS=${HOME}/ec2-publishing-scripts
2059+export CDIMAGE_ROOT="${HOME}/cdimage"
2060+export PATH="${PUBLISH_SCRIPTS}:${CDIMAGE_BIN}:${PATH}"
2061+checksum-directory ${final_location}
2062
2063=== added file 'jenkins/CloudImages_Update_Builder.sh'
2064--- jenkins/CloudImages_Update_Builder.sh 1970-01-01 00:00:00 +0000
2065+++ jenkins/CloudImages_Update_Builder.sh 2018-05-31 04:33:07 +0000
2066@@ -0,0 +1,68 @@
2067+#!/bin/bash
2068+
2069+# Set default umask
2070+umask 022
2071+
2072+# Read in the common files
2073+source "${kvm}/functions/common"
2074+source "${kvm}/functions/retry"
2075+
2076+# Apply the build stuff
2077+find . -iname "*build_properties" | xargs -I FILE cp FILE .
2078+[ -e "build_properties" ] &&
2079+ source build_properties ||
2080+ fail "Failed to read build_properties. I don't know what I'm doing!"
2081+
2082+[ -e failed ] && rm
2083+[ -e success ] && rm
2084+
2085+# Copy the target disk image
2086+case ${SUITE} in
2087+ trusty|xenial)
2088+ disk_orig="${SUITE}-server-cloudimg-${ARCH}-disk1.img"
2089+ builder_img=/srv/builder/images/trusty-builder-latest.img
2090+ ;;
2091+ zesty)
2092+ # Zesty needs yakkety or newer due to ext4 tool changes
2093+ disk_orig="${SUITE}-server-cloudimg-${ARCH}.img"
2094+ builder_img=/srv/builder/images/zesty-builder-latest.img
2095+ ;;
2096+ *)
2097+ disk_orig="${SUITE}-server-cloudimg-${ARCH}.img"
2098+ builder_img=/srv/builder/images/artful-builder-latest.img
2099+ ;;
2100+esac
2101+
2102+disk_cp="${disk_orig//cloudimg/cloudimg-builder-$(date +%Y%m%d)}"
2103+disk_root="/srv/ec2-images/${SUITE}/${SERIAL:-current}"
2104+disk_working="${WORKSPACE}/${disk_cp}"
2105+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
2106+
2107+cp "${disk_root}/${disk_orig}" "${disk_working}" ||
2108+ fail "Unable to copy ${disk_orig} from ${disk_root}"
2109+
2110+qemu-img resize "${disk_working}" 5G ||
2111+ fail "unable to resize disk"
2112+
2113+# Launch KVM to do the work
2114+${kvm}/launch_kvm.sh \
2115+ --id "${ARCH}-${BUILD_ID}" \
2116+ --user-data "${kvm}/config/cloud-vps.cfg" \
2117+ --cloud-config "${kvm}/templates/img-update.tmpl" \
2118+ --extra-disk "${disk_working}" \
2119+ --disk-gb 5 \
2120+ --raw-disk "${raw_f}" \
2121+ --raw-size 1 \
2122+ --img-url ${builder_img} ||
2123+ fail "KVM instance failed to build image."
2124+
2125+# Remove the results
2126+rm "${raw_f}" || /bin/true
2127+
2128+# Compress it down...
2129+mv "${disk_working}" "${disk_working}.new"
2130+qemu-img convert "${disk_working}.new" -c -O qcow2 "${disk_working}" ||
2131+ fail "Failed to create compressed image"
2132+
2133+rm "${disk_working}.new"
2134+
2135
2136=== added file 'jenkins/CloudImages_Vagrant.sh'
2137--- jenkins/CloudImages_Vagrant.sh 1970-01-01 00:00:00 +0000
2138+++ jenkins/CloudImages_Vagrant.sh 2018-05-31 04:33:07 +0000
2139@@ -0,0 +1,232 @@
2140+#!/bin/bash
2141+
2142+# Set default umask
2143+umask 022
2144+
2145+# Read in the common files
2146+source "${kvm}/functions/common"
2147+source "${kvm}/functions/retry"
2148+source "${kvm}/functions/locker"
2149+export HOME=${WORKSPACE}
2150+
2151+# Apply the build stuff
2152+[ -e "build_properties" ] &&
2153+ source build_properties ||
2154+ fail "Failed to read build_properties. I don't know what I'm doing!"
2155+
2156+rm {failed,success} || /bin/true
2157+
2158+# Copy the target disk image
2159+ARCH_TYPE=${ARCH_TYPE:-$ARCH}
2160+disk_orig="${SUITE}-server-cloudimg-${ARCH_TYPE}-disk1.img"
2161+disk_cp="${disk_orig//$ARCH_TYPE/$ARCH_TYPE-vagrant}"
2162+disk_root="${SRV_D:-/srv/ec2-images}/${SUITE}/${SERIAL:-current}"
2163+disk_working="${WORKSPACE}/${disk_cp}"
2164+final_disk="${WORKSPACE}/box-disk1.vdi"
2165+final_location="${OUTPUT_D:-/srv/ec2-images}/vagrant/${SUITE}/${SERIAL}"
2166+box_name="${disk_working//.img/.box}"
2167+raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
2168+
2169+[ -e "${final_location}/${box_name}" -a "${REBUILD}" != "true" ] && exit 0
2170+
2171+cp "${disk_root}/${disk_orig}" "${disk_working}" ||
2172+ fail "Unable to copy ${disk_orig} from ${disk_root}"
2173+
2174+# Resize it to 4G, but not the full 40G because we want it sparse
2175+qemu-img resize ${disk_working} 4G
2176+
2177+# Launch KVM to do the work
2178+${kvm}/launch_kvm.sh \
2179+ --id "${ARCH_TYPE}-${BUILD_ID}" \
2180+ --user-data "${kvm}/config/cloud-vps.cfg" \
2181+ --cloud-config "${kvm}/templates/img-vagrant.tmpl" \
2182+ --extra-disk "${disk_working}" \
2183+ --disk-gb 1 \
2184+ --raw-disk "${raw_f}" \
2185+ --raw-size 1 \
2186+ --img-url /srv/builder/images/precise-builder-latest.img ||
2187+ fail "KVM instance failed to build image."
2188+
2189+# Covert to VMDK.
2190+qemu-img convert -O raw ${disk_working} ${disk_working//.img/.raw}
2191+truncate -s 40G ${disk_working//.img/.raw}
2192+
2193+_vbox_cmd convertfromraw \
2194+ --format vdi \
2195+ ${disk_working//.img/.raw} ${final_disk}
2196+
2197+# Create the VM
2198+vmname="ubuntu-cloudimg-${SUITE}-vagrant-${ARCH_TYPE}"
2199+_vbox_cmd modifyhd --compact ${final_disk}
2200+
2201+dist_v="Ubuntu"
2202+[ "${ARCH_TYPE}" = "amd64" ] && dist_v="Ubuntu_64"
2203+_vbox_cmd createvm \
2204+ --name ${vmname} \
2205+ --ostype ${dist_v} \
2206+ --register
2207+
2208+_vbox_cmd modifyvm ${vmname} \
2209+ --memory 512 \
2210+ --boot1 disk \
2211+ --boot2 none \
2212+ --boot3 none \
2213+ --boot4 none \
2214+ --vram 12 \
2215+ --pae off \
2216+ --acpi on \
2217+ --ioapic on \
2218+ --rtcuseutc on
2219+# --natnet1 default \
2220+
2221+if [ "${ARCH_TYPE}" = "i386" ]; then
2222+ _vbox_cmd modifyvm ${vmname} \
2223+ --ioapic off \
2224+ --pae on
2225+fi
2226+
2227+
2228+_vbox_cmd modifyvm ${vmname} --natpf1 "guestssh,tcp,,2222,,22"
2229+
2230+_vbox_cmd storagectl "${vmname}" \
2231+ --name "SATAController" \
2232+ --add sata \
2233+ --controller IntelAhci \
2234+ --sataportcount 1 \
2235+ --hostiocache on
2236+
2237+_vbox_cmd storageattach ${vmname} \
2238+ --storagectl "SATAController" \
2239+ --port 0 \
2240+ --device 0 \
2241+ --type hdd \
2242+ --medium ${final_disk}
2243+
2244+# Set extra-data
2245+_vbox_cmd setextradata ${vmname} installdate ${serial}
2246+_vbox_cmd setextradata ${vmname} supported false
2247+
2248+# Set the Guest information to get rid of error message
2249+[ -e vagrant_image.pkgs ] && {
2250+
2251+ vbox_version=""
2252+ while read -r line
2253+ do
2254+ line=( $(echo ${line}) )
2255+ [[ ${line[0]} =~ virtualbox-guest-utils ]] && vbox_version=${line[1]}
2256+ done < vagrant_image.pkgs
2257+ debug "Guest Additions version is ${vbox_version}"
2258+
2259+ # Set the revision to some arbitrary value
2260+ _vbox_cmd guestproperty set ${vmname} \
2261+ "/VirtualBox/GuestAdd/Revision" '8000'
2262+
2263+ # Set the Ubuntu packaged version correctly
2264+ _vbox_cmd guestproperty set ${vmname} \
2265+ "/VirtualBox/GuestAdd/VersionExt" \
2266+ "${vbox_version//-dfsg-*/_Ubuntu}"
2267+
2268+ # Set the version string appropriately
2269+ _vbox_cmd guestproperty set ${vmname} \
2270+ "/VirtualBox/GuestAdd/Version" \
2271+ "${vbox_version//-dfsg-*/}"
2272+}
2273+
2274+mkdir box
2275+_vbox_cmd export ${vmname} --output box/box.ovf
2276+
2277+# Create the Vagrant file
2278+#macaddr="02:$(openssl rand -hex 5)"
2279+macaddr=$(awk '-F"' '/<Adapter slot="0" enabled="true"/ {print$6}' ${WORKSPACE}/box/box.ovf)
2280+cat << EOF > ${WORKSPACE}/box/Vagrantfile
2281+Vagrant::Config.run do |config|
2282+ # This Vagrantfile is auto-generated by 'vagrant package' to contain
2283+ # the MAC address of the box. Custom configuration should be placed in
2284+ # the actual 'Vagrantfile' in this box.
2285+ config.vm.base_mac = "${macaddr}"
2286+end
2287+
2288+# Load include vagrant file if it exists after the auto-generated
2289+# so it can override any of the settings
2290+include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__)
2291+load include_vagrantfile if File.exist?(include_vagrantfile)
2292+EOF
2293+
2294+# Now pack it all up....
2295+tar -C ${WORKSPACE}/box -Scvf ${box_name} box.ovf Vagrantfile box-disk1.vmdk ||
2296+ fail "Unable to create box file"
2297+
2298+# Some minor cleanup
2299+rm ${disk_working} ${disk_working//.img/.raw} || /bin/true
2300+rm -rf ${WORKSPACE}/box *.vdi
2301+rm "${raw_f}" || /bin/true
2302+
2303+# Put the box in place
2304+mkdir -p "${final_location}" ||
2305+ fail "Unable to create the vagrant image location"
2306+
2307+cp ${box_name} ${final_location} ||
2308+ fail "Failed to place vagrant image in final home"
2309+
2310+# box_d is where the boxes are stored
2311+box_d="${OUTPUT_D:-/srv/ec2-images}/vagrant/${SUITE}"
2312+
2313+# Only proceed if the required boxes exist
2314+boxes=($(find ${box_d}/${SERIAL} -regextype posix-extended -regex ".*(amd64|i386)-vagrant-disk1.box"))
2315+if [ "${#boxes[@]}" -ne 2 ]; then
2316+ echo "Not updating current, required boxes are missing"
2317+ [[ ! "${boxes[@]}" =~ "amd64" ]] && echo "Missing build for amd64"
2318+ [[ ! "${boxes[@]}" =~ "i386" ]] && echo "Missing build for i386"
2319+
2320+ # We don't want to fail here.
2321+ exit 0
2322+else
2323+ echo "Updating current links; all builds are present"
2324+fi
2325+
2326+# Update the link to current
2327+current_l="${box_d}/current"
2328+[ -e "${current_l}" ] && rm "${current_l}"
2329+( cd "${box_d}" && ln -s "${SERIAL}" current )
2330+
2331+# Cleanup old builds
2332+builds=($(find ${box_d} -mindepth 1 -maxdepth 1 -type d | sort -r))
2333+build_count="${#builds[@]}"
2334+
2335+echo "------------------------"
2336+echo "Clean-up for prior builds"
2337+echo "Found ${build_count} builds for consideration"
2338+
2339+for b in ${builds[@]}
2340+do
2341+ echo " - found build ${b}"
2342+done
2343+echo ""
2344+
2345+[ "${build_count}" -gt 4 ] && {
2346+ for item in $(seq 4 ${build_count})
2347+ do
2348+ [ -e "${builds[$item]}" ] && {
2349+ echo "Removing build ${builds[$item]} for deletion"
2350+ rm -rf ${builds[$item]} ||
2351+ echo "Failed to remove build ${builds[$item]}"
2352+ }
2353+ done
2354+
2355+ for item in $(seq 0 3)
2356+ do
2357+ [ -e "${builds[$item]}" ] &&
2358+ echo "Preserving build ${builds[$item]}"
2359+ done
2360+
2361+} || echo "No builds marked for removal"
2362+
2363+
2364+# Override and set some home variables
2365+export HOME="/srv/builder"
2366+export CDIMAGE_BIN="${HOME}/cdimage/bin"
2367+PUBLISH_SCRIPTS=${HOME}/ec2-publishing-scripts
2368+export CDIMAGE_ROOT="${HOME}/cdimage"
2369+export PATH="${PUBLISH_SCRIPTS}:${CDIMAGE_BIN}:${PATH}"
2370+checksum-directory ${final_location}
2371+
2372
2373=== added file 'jenkins/MAAS_Builder.sh'
2374--- jenkins/MAAS_Builder.sh 1970-01-01 00:00:00 +0000
2375+++ jenkins/MAAS_Builder.sh 2018-05-31 04:33:07 +0000
2376@@ -0,0 +1,171 @@
2377+#!/bin/bash
2378+set -x
2379+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
2380+
2381+find . -iname "*build_properties" -exec cp {} . \; ||
2382+ echo "Unable to copy build properties, this might be v2"
2383+
2384+[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
2385+ source build_properties ||
2386+ fail "Failed to read build_properties. I don't know what I'm doing!";
2387+}
2388+
2389+# Read in the common functions
2390+my_dir="$( cd "$( dirname "$0" )" && pwd )"
2391+base_dir=$(dirname ${my_dir})
2392+export PATH="${base_dir}:${my_dir}:${PATH}"
2393+source "${base_dir}/functions/locker"
2394+source "${base_dir}/functions/common"
2395+source "${base_dir}/functions/retry"
2396+source ${my_dir}/build_lib.sh
2397+select_build_config
2398+
2399+export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
2400+out_f="${WORKSPACE}/maas-${SUITE}-${STREAM}-config.sh"
2401+raw_f="${WORKSPACE}/${SUITE}-output.raw"
2402+query_t="${WORKSPACE}/cloud-images-query.tar"
2403+base_name="${SUITE}-server-cloudimg"
2404+rel_base_name="ubuntu-$(ubuntu-adj2version ${SUITE})-${stream//-/}-server-cloudimg"
2405+
2406+export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
2407+
2408+case "${STREAM}" in
2409+ release) build_f="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}";
2410+ base_name=${rel_base_name};
2411+ out_d="/srv/maas-images/ephemeral/releases/${SUITE}/release-${SERIAL}"
2412+ ;;
2413+ daily) build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
2414+ out_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}";
2415+ ;;
2416+ alpha*|beta*) build_f="/srv/ec2-images/releases/${SUITE}/${STREAM}";
2417+ base_name=${rel_base_name};
2418+ out_d="/srv/maas-images/ephemeral/release/${SUITE}/${STREAM}";
2419+ ;;
2420+ *) fail "Unknown stream ${STREAM}.";;
2421+esac
2422+
2423+final_out_d="${out_d}"
2424+
2425+[ -e "${final_out_d}" -a "${REBUILD:-false}" = "false" ] &&
2426+ fail "Build already exists. Rebuild is set to false. Failing this build"
2427+
2428+# Tar up query for use in-image
2429+[ ! -e "${query_t}" ] && {
2430+ tar cvf ${query_t} \
2431+ ${QUERY_D:-/srv/ec2-images/query} \
2432+ ${build_f} \
2433+ --exclude "*img" --exclude "*azure*" --exclude "*html" \
2434+ --exclude "*armel*" --exclude "*root.tar.gz" \
2435+ --exclude "*floppy" ||
2436+ fail "Failed to pack up build elements for MAAS builder"; }
2437+
2438+# Generate the template file
2439+ci_cfg="${kvm_builder}/config/cloud-maas.cfg"
2440+template="${kvm_builder}/templates/img-maas.tmpl"
2441+[ "${IS_MAAS_V2:-0}" -eq 1 ] && {
2442+ template="${kvm_builder}/templates/img-maasv2.tmpl"
2443+ ci_cfg="${kvm_builder}/config/cloud-maasv2.cfg"
2444+}
2445+
2446+maas_config.sh \
2447+ --distro "${SUITE}" \
2448+ --stream "${STREAM}" \
2449+ --template "${template}" \
2450+ --base-name "${base_name}" \
2451+ --local "${build_f}" \
2452+ --serial "${SERIAL}" \
2453+ --out "${out_f}" \
2454+ --out_d "${out_d}" ||
2455+ fail "Failed to configure KVM instance for building"
2456+
2457+[ -n "${cloud_init_cfg}" ] && ci_cfg="${kvm_builder}/config/${cloud_init_cfg}"
2458+
2459+# Launch KVM to do the work
2460+launch_kvm.sh \
2461+ --id ${BUILD_ID} \
2462+ --user-data "${out_f}" \
2463+ --cloud-config "${ci_cfg}" \
2464+ --extra-disk "${query_t}" \
2465+ --disk-gb 50 \
2466+ --raw-disk "${raw_f}" \
2467+ --raw-size 20 \
2468+ --img-url ${BUILDER_CLOUD_IMAGE} ||
2469+ fail "KVM instance failed to build image."
2470+
2471+# Extract the result set
2472+tar -xvvf "${raw_f}" ||
2473+ fail "Failed to extract information from instance"
2474+
2475+# Useful for off-host builds, like ppc64el. Just make sure that any-off host
2476+# builds are done before the on-hosts builds.
2477+[ "${BUILD_ONLY:-0}" -eq 1 ] && exit 0
2478+
2479+# Extracted reslts should be here
2480+[ ! -e "${WORKSPACE}/${out_d}" ] && fail "Expected result directory is missing: ${WORKSPACE}/${out_d}"
2481+
2482+# Checksum the results (and sign 'em)
2483+export CDIMAGE_ROOT="/srv/builder/vmbuilder/cdimage"
2484+/srv/builder/vmbuilder/bin/cronrun checksum-directory "${WORKSPACE}/${out_d}" ||
2485+ fail "Failed to create checksums and GPG signatures"
2486+
2487+set -x
2488+# Put the bits where they go...
2489+mkdir -p "${final_out_d}" &&
2490+ cp -a ${WORKSPACE}${out_d}/* "${final_out_d}" &&
2491+ echo "Copied bits to final location ${final_out_d}" ||
2492+ fail "Unable to copy build bits to final location"
2493+
2494+# Produce build-info
2495+cat << EOF > "${final_out_d}/build-info.txt"
2496+serial=${SERIAL}
2497+orig_prefix=${SUITE}-ephemeral-maas
2498+suite=${SUITE}
2499+build_name=ephemeral
2500+EOF
2501+
2502+# Clean up the dailies
2503+if [ "${STREAM}" = "daily" ]; then
2504+ base_d="${out_d%/*}"
2505+ builds=( $(find ${base_d} -maxdepth 1 -mindepth 1 -type d | sort -r) )
2506+ build_count=${#builds[@]}
2507+
2508+ # Delete all but the
2509+ if [ ${build_count} -gt 6 ]; then
2510+ for item in $(seq 6 ${build_count})
2511+ do
2512+ [ -e "${builds[$item]}" ] && {
2513+ rm -rf ${builds[$item]};
2514+ echo "Build ${SUITE} ${builds[$item]##*/} has been deleted";
2515+ }
2516+ done
2517+
2518+ for item in $(seq 0 5)
2519+ do
2520+ echo "Preserving ${SUITE} ${builds[$item]##*/}"
2521+ done
2522+ else
2523+ echo "No builds marked for deletion"
2524+ fi
2525+fi
2526+
2527+# Generate the Query2 tree
2528+src_tree="${WORKSPACE}/maas_src"
2529+bzr branch "${maas_branch}" "${src_tree}"
2530+${src_tree}/tree2query \
2531+ --commit-msg "Build ${BUILD_ID}" \
2532+ --namespace maas \
2533+ /srv/maas-images
2534+
2535+# Update current
2536+if [ "${STREAM}" = "daily" ]; then
2537+ cur_d="/srv/maas-images/ephemeral/daily/${SUITE}/current"
2538+ [ -e "${cur_d}" ] && rm "${cur_d}"
2539+ ln -s "${final_out_d}" "${cur_d}" ||
2540+ echo "Failed to update ${cur_d}"
2541+fi
2542+
2543+
2544+# Remove the results
2545+rm "${raw_f}" ||
2546+ fail "Failed to clean up files!"
2547+
2548
2549=== added file 'jenkins/MAAS_Promotion.sh'
2550--- jenkins/MAAS_Promotion.sh 1970-01-01 00:00:00 +0000
2551+++ jenkins/MAAS_Promotion.sh 2018-05-31 04:33:07 +0000
2552@@ -0,0 +1,31 @@
2553+#!/bin/bash
2554+
2555+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
2556+
2557+if [ "${TAG}" == "release" ]; then
2558+ TAG="release-${SERIAL}"
2559+fi
2560+
2561+src_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}"
2562+final_out_d="/srv/maas-images/ephemeral/releases/${SUITE}/${TAG}"
2563+
2564+[ -e ${src_d} ] ||
2565+ fail "Source ${src_d} does not exist"
2566+
2567+[ -e ${final_out_d} ] &&
2568+ fail "Serial has already been promoted"
2569+
2570+mkdir -p "${final_out_d}" &&
2571+ rsync -a ${src_d}/ ${final_out_d} &&
2572+ echo "Copied bits to final location ${final_out_d}" ||
2573+ fail "Unable to copy build bits to final location"
2574+
2575+# Generate the Query2 tree
2576+export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
2577+src_tree="${WORKSPACE}/maas_src"
2578+bzr branch "${maas_branch}" "${src_tree}"
2579+${src_tree}/tree2query \
2580+ --commit-msg "Build ${BUILD_ID}" \
2581+ --namespace maas \
2582+ /srv/maas-images
2583+
2584
2585=== added file 'jenkins/MAASv2_Builder.sh'
2586--- jenkins/MAASv2_Builder.sh 1970-01-01 00:00:00 +0000
2587+++ jenkins/MAASv2_Builder.sh 2018-05-31 04:33:07 +0000
2588@@ -0,0 +1,191 @@
2589+#!/bin/bash
2590+set -x
2591+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
2592+
2593+find . -iname "*build_properties" -exec cp {} . \; ||
2594+ echo "Unable to copy build properties, this might be v2"
2595+
2596+[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
2597+ source build_properties ||
2598+ fail "Failed to read build_properties. I don't know what I'm doing!";
2599+}
2600+
2601+STREAM="${STREAM:-daily}"
2602+# Read in the common functions
2603+my_dir="$( cd "$( dirname "$0" )" && pwd )"
2604+base_dir=$(dirname ${my_dir})
2605+export PATH="${base_dir}:${my_dir}:${PATH}"
2606+source "${base_dir}/functions/locker"
2607+source "${base_dir}/functions/common"
2608+source "${base_dir}/functions/retry"
2609+source ${my_dir}/build_lib.sh
2610+select_build_config
2611+
2612+export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
2613+out_f="${WORKSPACE}/maas-${SUITE}-${STREAM}-config.sh"
2614+raw_f="${WORKSPACE}/${SUITE}-output.raw"
2615+query_t="${WORKSPACE}/cloud-images-query.tar"
2616+base_name="${SUITE}-server-cloudimg"
2617+rel_base_name="ubuntu-${VERSION:-$(ubuntu-adj2version ${SUITE})}-${stream//-/}-server-cloudimg"
2618+
2619+export maas_branch_v1="http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral"
2620+export maas_branch="${MAAS_BRANCH:-$maas_branch_v1}"
2621+
2622+case "${STREAM}" in
2623+ release) build_f="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}";
2624+ base_name=${rel_base_name};
2625+ out_d="/srv/maas-images/ephemeral/releases/${SUITE}/release-${SERIAL}"
2626+ ;;
2627+ daily) build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
2628+ out_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}";
2629+ ;;
2630+ alpha*|beta*) build_f="/srv/ec2-images/releases/${SUITE}/${STREAM}";
2631+ base_name=${rel_base_name};
2632+ out_d="/srv/maas-images/ephemeral/releases/${SUITE}/${STREAM}";
2633+ ;;
2634+ *) fail "Unknown stream ${STREAM}.";;
2635+esac
2636+
2637+final_out_d="${out_d}"
2638+
2639+[ -e "${final_out_d}" -a "${REBUILD:-false}" = "false" ] &&
2640+ fail "Build already exists. Rebuild is set to false. Failing this build"
2641+
2642+if [ ! -e "${query_t}" ]; then
2643+
2644+ if [ "${MAASv2:-0}" -eq 1 ]; then
2645+ # MAAS v2 doesn't need this information
2646+ out_d="/tmp/maas_final"
2647+ touch ${WORKSPACE}/maasv2
2648+ tar cvf ${query_t} ${WORKSPACE}/maasv2
2649+
2650+ if [ -e "${WORKSPACE}/tmp/maas-final" ]; then
2651+ tar cvf ${query_t} maas-final ||
2652+ fail "Failed to create tarball of MAAS images"
2653+ fi
2654+
2655+ else
2656+ # MAAS v1 need information
2657+ tar cvf ${query_t} \
2658+ ${QUERY_D:-/srv/ec2-images/query} \
2659+ ${build_f} \
2660+ --exclude "*img" --exclude "*azure*" --exclude "*html" \
2661+ --exclude "*armel*" --exclude "*root.tar.gz" \
2662+ --exclude "*floppy" ||
2663+ fail "Failed to pack up build elements for MAAS builder";
2664+ fi
2665+fi
2666+
2667+# Select the right template
2668+tmpl="${kvm_builder}/templates/img-maas.tmpl"
2669+[ "${MAASv2:-0}" -eq 1 ] && tmpl="${tmpl//maas.tmpl/maasv2.tmpl}"
2670+
2671+# Construct the right template
2672+maas_config.sh \
2673+ --distro "${SUITE}" \
2674+ --stream "${STREAM}" \
2675+ --template "${tmpl}" \
2676+ --base-name "${base_name}" \
2677+ --local "${build_f}" \
2678+ --serial "${SERIAL}" \
2679+ --out "${out_f}" \
2680+ --maas-branch "${maas_branch}" \
2681+ --out_d "${out_d}" ||
2682+ fail "Failed to configure KVM instance for building"
2683+set +x
2684+
2685+ci_cfg="${kvm_builder}/config/cloud-maasv2.cfg"
2686+[ "$(uname -m)" == "ppc64" ] && ci_cfg="${kvm_builder}/config/cloud-trusty-pp64el.cfg"
2687+
2688+# Launch KVM to do the work
2689+launch_kvm.sh \
2690+ --id ${BUILD_ID} \
2691+ --user-data "${out_f}" \
2692+ --cloud-config "${ci_cfg}" \
2693+ --extra-disk "${query_t}" \
2694+ --disk-gb 50 \
2695+ --raw-disk "${raw_f}" \
2696+ --raw-size 20 \
2697+ --mem 1G \
2698+ --img-url ${BUILDER_CLOUD_IMAGE} ||
2699+ fail "KVM instance failed to build image."
2700+
2701+# Extract the result set
2702+tar -xvvf "${raw_f}" ||
2703+ fail "Failed to extract information from instance"
2704+
2705+# Useful for off-host builds, like ppc64el. Just make sure that any-off host
2706+# builds are done before the on-hosts builds.
2707+
2708+[ "${BUILD_ONLY:-0}" -eq 1 ] && exit 0
2709+[ "${MAASv2:-0}" -eq 1 ] && exit 0
2710+
2711+# Extracted reslts should be here
2712+[ ! -e "${WORKSPACE}/${out_d}" ] && fail "Expected result directory is missing: ${WORKSPACE}/${out_d}"
2713+
2714+# Checksum the results (and sign 'em)
2715+export CDIMAGE_ROOT="/srv/builder/vmbuilder/cdimage"
2716+/srv/builder/vmbuilder/bin/cronrun checksum-directory "${WORKSPACE}/${out_d}" ||
2717+ fail "Failed to create checksums and GPG signatures"
2718+
2719+set -x
2720+# Put the bits where they go...
2721+mkdir -p "${final_out_d}" &&
2722+ cp -a ${WORKSPACE}${out_d}/* "${final_out_d}" &&
2723+ echo "Copied bits to final location ${final_out_d}" ||
2724+ fail "Unable to copy build bits to final location"
2725+
2726+# Produce build-info
2727+cat << EOF > "${final_out_d}/build-info.txt"
2728+serial=${SERIAL}
2729+orig_prefix=${SUITE}-ephemeral-maas
2730+suite=${SUITE}
2731+build_name=ephemeral
2732+EOF
2733+
2734+# Clean up the dailies
2735+if [ "${STREAM}" = "daily" ]; then
2736+ base_d="${out_d%/*}"
2737+ builds=( $(find ${base_d} -maxdepth 1 -mindepth 1 -type d | sort -r) )
2738+ build_count=${#builds[@]}
2739+
2740+ # Delete all but the
2741+ if [ ${build_count} -gt 6 ]; then
2742+ for item in $(seq 6 ${build_count})
2743+ do
2744+ [ -e "${builds[$item]}" ] && {
2745+ rm -rf ${builds[$item]};
2746+ echo "Build ${SUITE} ${builds[$item]##*/} has been deleted";
2747+ }
2748+ done
2749+
2750+ for item in $(seq 0 5)
2751+ do
2752+ echo "Preserving ${SUITE} ${builds[$item]##*/}"
2753+ done
2754+ else
2755+ echo "No builds marked for deletion"
2756+ fi
2757+fi
2758+
2759+# Generate the Query2 tree
2760+src_tree="${WORKSPACE}/maas_src"
2761+bzr branch "${maas_branch_v1}" "${src_tree}"
2762+${src_tree}/tree2query \
2763+ --commit-msg "Build ${BUILD_ID}" \
2764+ --namespace maas \
2765+ /srv/maas-images
2766+
2767+# Update current
2768+if [ "${STREAM}" = "daily" ]; then
2769+ cur_d="/srv/maas-images/ephemeral/daily/${SUITE}/current"
2770+ [ -e "${cur_d}" ] && rm "${cur_d}"
2771+ ln -s "${final_out_d}" "${cur_d}" ||
2772+ echo "Failed to update ${cur_d}"
2773+fi
2774+
2775+
2776+# Remove the results
2777+rm "${raw_f}" ||
2778+ fail "Failed to clean up files!"
2779+
2780
2781=== added file 'jenkins/MAASv2_Cleaner.sh'
2782--- jenkins/MAASv2_Cleaner.sh 1970-01-01 00:00:00 +0000
2783+++ jenkins/MAASv2_Cleaner.sh 2018-05-31 04:33:07 +0000
2784@@ -0,0 +1,55 @@
2785+#!/bin/bash
2786+#
2787+# Clean up MAAS2 v2/v3 builds/streams
2788+#
2789+my_dir="$(dirname $0)"
2790+my_p_dir="$(dirname $my_dir)"
2791+source ${my_p_dir}/functions/common
2792+source ${my_p_dir}/functions/bzr_check.sh
2793+
2794+# Number of builds to publish in the stream
2795+MAX_BUILDS=${MAX_BUILDS:-3}
2796+# Number of days to keep files not referenced in the stream data
2797+REAP_AGE=${REAP_AGE:-2d}
2798+
2799+WORKSPACE=${WORKSPACE:-$PWD}
2800+OUTDIR=${JENKINS_HOME:?}/.config/MAASv2_Cleaner/
2801+DAILY_ROOT=/srv/maas-images/ephemeral-v2/daily/
2802+RELEASE_ROOT=/srv/maas-images/ephemeral-v2/releases/
2803+INDEX_PATH=streams/v1/index.json
2804+
2805+# Local checkouts
2806+sstreams=${WORKSPACE}/sstreams
2807+maasv2=${WORKSPACE}/maasv2
2808+check_branch ${BZR_SIMPLESTREAMS:-lp:simplestreams} ${sstreams}
2809+check_branch ${BZR_MAASv2:-lp:maas-images} ${maasv2}
2810+
2811+for METADATA_ROOT in /srv/maas-images/ephemeral-v2/daily/ \
2812+ /srv/maas-images/ephemeral-v3/daily/; do
2813+ case $METADATA_ROOT in
2814+ *v2*)
2815+ orphan_json="${OUTDIR}/daily.json"
2816+ ;;
2817+ *v3*)
2818+ orphan_json="${OUTDIR}/daily-v3.json"
2819+ ;;
2820+ *)
2821+ echo "Unexpected METADATA_ROOT"
2822+ exit 1
2823+ ;;
2824+ esac
2825+ run PYTHONPATH=${sstreams}:${maasv2} \
2826+ ${maasv2}/bin/meph2-util clean-md \
2827+ ${MAX_BUILDS} ${METADATA_ROOT}/${INDEX_PATH}
2828+
2829+ run PYTHONPATH=${sstreams}:${maasv2} \
2830+ ${maasv2}/bin/meph2-util find-orphans \
2831+ "${orphan_json}" \
2832+ ${METADATA_ROOT} ${METADATA_ROOT}/${INDEX_PATH}
2833+
2834+ run PYTHONPATH=${sstreams}:${maasv2} \
2835+ ${maasv2}/bin/meph2-util reap-orphans \
2836+ --older ${REAP_AGE} \
2837+ "${orphan_json}" \
2838+ ${METADATA_ROOT}
2839+done
2840
2841=== added file 'jenkins/MAASv3_Builder.sh'
2842--- jenkins/MAASv3_Builder.sh 1970-01-01 00:00:00 +0000
2843+++ jenkins/MAASv3_Builder.sh 2018-05-31 04:33:07 +0000
2844@@ -0,0 +1,67 @@
2845+#!/bin/bash -x
2846+
2847+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
2848+
2849+[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
2850+ source build_properties ||
2851+ fail "Failed to read build_properties.";
2852+}
2853+
2854+# Read in the common functions
2855+my_dir="$( cd "$( dirname "$0" )" && pwd )"
2856+base_dir=$(dirname ${my_dir})
2857+export PATH="${base_dir}:${my_dir}:${PATH}"
2858+source "${base_dir}/functions/locker"
2859+source "${base_dir}/functions/common"
2860+source "${base_dir}/functions/retry"
2861+source ${my_dir}/build_lib.sh
2862+select_build_config
2863+
2864+case "${STREAM:?}" in
2865+ daily) build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
2866+ base_name="${SUITE}-server-cloudimg"
2867+ ;;
2868+ *) fail "Unknown/unsupported stream ${STREAM}.";;
2869+esac
2870+
2871+export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
2872+out_f="${WORKSPACE:?}/maas-${SUITE}-${STREAM}-config.sh"
2873+raw_f="${WORKSPACE}/${SUITE}-output.raw"
2874+
2875+export maas_branch="${MAAS_BRANCH:?}"
2876+
2877+touch ${WORKSPACE}/maasv3
2878+tar cvf ${query_t} ${WORKSPACE}/maasv3
2879+
2880+# Construct the right template
2881+maas_config.sh \
2882+ --distro "${SUITE}" \
2883+ --stream "${STREAM}" \
2884+ --template "${kvm_builder}/templates/img-maasv3.tmpl" \
2885+ --base-name "${base_name}" \
2886+ --local "${build_f}" \
2887+ --serial "${SERIAL}" \
2888+ --out "${out_f}" \
2889+ --maas-branch "${maas_branch}" \
2890+ --out_d "/tmp/maas_final" ||
2891+ fail "Failed to configure KVM instance for building"
2892+
2893+ci_cfg="${kvm_builder}/config/cloud-maasv3.cfg"
2894+[ "$(uname -m)" == "ppc64" ] && ci_cfg="${kvm_builder}/config/cloud-trusty-pp64el.cfg"
2895+
2896+# Launch KVM to do the work
2897+launch_kvm.sh \
2898+ --id ${BUILD_ID} \
2899+ --user-data "${out_f}" \
2900+ --cloud-config "${ci_cfg}" \
2901+ --extra-disk "${query_t}" \
2902+ --disk-gb 50 \
2903+ --raw-disk "${raw_f}" \
2904+ --raw-size 20 \
2905+ --mem 1G \
2906+ --img-url ${BUILDER_CLOUD_IMAGE} ||
2907+ fail "KVM instance failed to build image."
2908+
2909+# Extract the result set
2910+tar -xvvf "${raw_f}" ||
2911+ fail "Failed to extract information from instance"
2912
2913=== added file 'jenkins/Promote_Daily.sh'
2914--- jenkins/Promote_Daily.sh 1970-01-01 00:00:00 +0000
2915+++ jenkins/Promote_Daily.sh 2018-05-31 04:33:07 +0000
2916@@ -0,0 +1,55 @@
2917+#!/bin/bash
2918+echo "---------------------------------------------------"
2919+echo "Instructed to Promote Daily job:
2920+echo " Suite: ${SUITE}"
2921+echo " Serial: ${SERIAL}"
2922+echo " Milestone: ${MILESTONE_LABEL}"
2923+echo " Stream: ${BTYPE}"
2924+echo " Public: ${MAKE_PUBLIC}"
2925+echo " PrePublish: ${PREPUBLISH}"
2926+echo "
2927+echo "---------------------------------------------------"
2928+
2929+cat << EOF > "${WORKSPACE}/build_properties"
2930+SUITE=${SUITE}
2931+SERIAL=${SERIAL}
2932+MILESTONE=${MILESTONE_LABEL}
2933+STREAM=${BTYPE}
2934+PUBLIC=${MAKE_PUBLIC}
2935+PREPUBLISH=${PREPUBLISH}
2936+EOF
2937+
2938+export HOME="/srv/builder/vmbuilder"
2939+
2940+cmd=(
2941+ '/srv/builder/vmbuilder/bin/cronrun'
2942+ 'promote-daily'
2943+ '--verbose'
2944+ '--allow-existing' )
2945+
2946+if [ "${PREPUBLISH}" == "true" ]; then
2947+ echo "Pre-publishing rules, will not make public"
2948+else
2949+ [ "${MAKE_PUBLIC}" == "true" ] && cmd+=('--make-public')
2950+fi
2951+
2952+case ${BTYPE} in
2953+ *server*hwe*) pub_path="/srv/ec2-images/server/${SUITE}/${SERIAL}/${BTYPE//server-/}";;
2954+ *) pub_path="/srv/ec2-images/${BTYPE}/${SUITE}/${SERIAL}"
2955+ ;;
2956+esac
2957+
2958+if [ "${REPUBLISH}" == "true" ]; then
2959+ cmd+=('--republish')
2960+ if [ "${MILESTONE_LABEL}" == "release" ]; then
2961+ pub_path="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}"
2962+ else
2963+ pub_path="/srv/ec2-images/releases/${SUITE}/${MILESTONE_LABEL}"
2964+ fi
2965+ [[ "${BTYPE}" =~ server-hwe ]] && pub_path="${pub_path}/${BTYPE//server-/}"
2966+fi
2967+
2968+cmd+=("${MILESTONE_LABEL}" ${pub_path})
2969+
2970+echo "Executing command: ${cmd[@]}"
2971+exec ${cmd[@]}
2972
2973=== added file 'jenkins/Promote_MAAS_Daily.sh'
2974--- jenkins/Promote_MAAS_Daily.sh 1970-01-01 00:00:00 +0000
2975+++ jenkins/Promote_MAAS_Daily.sh 2018-05-31 04:33:07 +0000
2976@@ -0,0 +1,48 @@
2977+#!/bin/bash
2978+
2979+export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
2980+
2981+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
2982+
2983+[ -z "${SERIAL}" ] && fail "Serial must be defined"
2984+[ -z "${SUITE}" ] && fail "Suite must be defined"
2985+[ -z "${MILESTONE}" ] && fail "Milestone must be defined"
2986+
2987+cp_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}"
2988+finald="/srv/maas-images/ephemeral/releases/${SUITE}"
2989+
2990+case "${MILESTONE}" in
2991+ release) final_d="${finald}/release-${SERIAL}"
2992+ alpha|beta) final_d="${finald}/${milestone}"
2993+esac
2994+
2995+# Sanity check
2996+[ ! -e "${cp_d}" ] && fail "Serial ${SERIAL} for ${SUITE} does not exist"
2997+[ -e "${final_d}" ] && fail "Already released ${SERIAL} for ${SUITE} as ${MILESTONE}"
2998+
2999+# Make the home directory
3000+mkdir -p "${final_d}" ||
3001+ fail "Unable to create final destination"
3002+
3003+# Put the files in final destination
3004+cp -au ${cp_d}/* "${final_d}" ||
3005+ fail "Failed to copy source files for promotion"
3006+
3007+# Generate the Query2 tree
3008+src_tree="${WORKSPACE}/maas_src"
3009+bzr branch "${maas_branch}" "${src_tree}"
3010+${src_tree}/tree2query \
3011+ --commit-msg "Build ${BUILD_ID}" \
3012+ --namespace maas \
3013+ /srv/maas-images
3014+
3015+# Update the "release" link
3016+if [ "${MILESTONE}" = "release" ]; then
3017+ cur_d="/srv/maas-images/ephemeral/releases/${SUITE}/release"
3018+ [ -e "${cur_d}" ] && rm "${cur_d}"
3019+ ln -s "${final_d}" "${cur_d}" ||
3020+ echo "Failed to update ${cur_d}"
3021+fi
3022+
3023+# Sync the stuff
3024+KEY=maas /srv/builder/vmbuilder/bin/trigger-sync
3025
3026=== added file 'jenkins/Publish_EC2.sh'
3027--- jenkins/Publish_EC2.sh 1970-01-01 00:00:00 +0000
3028+++ jenkins/Publish_EC2.sh 2018-05-31 04:33:07 +0000
3029@@ -0,0 +1,64 @@
3030+#!/bin/bash -x
3031+
3032+# Add in the retry stub
3033+source "${kvm}/functions/retry"
3034+source "${kvm}/functions/common"
3035+
3036+# Exit if trigger job does not want this published
3037+[ "${PUBLISH_IMAGE}" -eq 0 ] && exit 0
3038+
3039+# Set the build directories
3040+WORK_D="/srv/ec2-images/${BUILD_TYPE}/${SUITE}/${SERIAL}"
3041+[ "${TEST_BUILD}" -eq 1 ] && WORK_D="/srv/ec2-images/test_builds/${BUILD_TYPE}/${SUITE}/${SERIAL}"
3042+[ "${SANDBOX_BUILD}" -eq 1 ] && WORK_D="/srv/ec2-images/sandbox/${BUILD_TYPE}/${SUITE}/${SERIAL}"
3043+
3044+# Handle the special case of HWE builds. Otherwise they get the names of server
3045+# and things don't go well.
3046+[[ "${HWE_SUFFIX}" =~ hwe ]] &&
3047+ WORK_D="${WORK_D}/${HWE_SUFFIX}" &&
3048+ BUILD_TYPE="${BUILD_TYPE}-${HWE_SUFFIX}"
3049+
3050+
3051+echo "Using ${WORK_D} as the directory"
3052+[ -e "${WORK_D}" ] || { echo "Working directory does not exist!"; exit 1; }
3053+
3054+ec2_pub="${PWD}/ec2-publishing-scripts"
3055+
3056+# Check out the scripts needed
3057+[ -e "${ec2_pub}" ] && rm -rf "${ec2_pub}"
3058+bzr branch "${EC2_PUB_SCRIPTS}" "${ec2_pub}"
3059+
3060+# Add some elements to the path
3061+VMBUILDER_PATH="${VMBUILDER_PATH:-/srv/builder/vmbuilder}"
3062+VMBUILDER_BIN="${VMBUILDER_PATH}/bin"
3063+XC2_PATH="${VMBUILDER_PATH}/ec2-daily/xc2"
3064+export PUBLISH_SCRIPTS="${PUBLISH_SCRIPTS:-$VMBUILDER_PATH/ec2-publishing-scripts}"
3065+export PATH="${VMBUILDER_BIN}:${VMBUILDER_BIN}:${VMBUILDER_PATH}:${XC2_PATH}:${PATH}"
3066+export HOME="/srv/builder/vmbuilder"
3067+export CDIMAGE_ROOT="${CDIMAGE_ROOT:-/srv/builder/cdimage}"
3068+export EC2_PUB_LOC="${ec2_pub}"
3069+
3070+ec2publish() {
3071+ # Run the publisher job
3072+ ${kvm}/ec2_publisher.sh \
3073+ ${SUITE} \
3074+ ${SERIAL} \
3075+ ${BUILD_TYPE} \
3076+ ${WORK_D} \
3077+ ${TEST_BUILD} \
3078+ ${SANDBOX_BUILD} \
3079+ ${ALLOW_EXISTING}
3080+}
3081+
3082+# Retry the publishing up to 3 times
3083+retry 6 120 ec2publish ||
3084+ fail "Failed three attempts to publish EC2 images!"
3085+
3086+# Add the new daily to the tracker
3087+#exec_tracker=${ADD_TO_TRACKER:-0}
3088+#[ "${exec_tracker}" -eq 1 ] && {
3089+# ${kvm}/tracker.sh daily ${SUITE} ${SERIAL} &&
3090+# exit $? || fail "Unable to execute tracker!"
3091+# }
3092+#
3093+#exit 0
3094
3095=== added file 'jenkins/Publish_Results_to_Tracker.sh'
3096--- jenkins/Publish_Results_to_Tracker.sh 1970-01-01 00:00:00 +0000
3097+++ jenkins/Publish_Results_to_Tracker.sh 2018-05-31 04:33:07 +0000
3098@@ -0,0 +1,34 @@
3099+#!/bin/bash
3100+
3101+# Environmental variables:
3102+# HOST: the Jenkins host URL to poll from
3103+# SUITE: Ubuntu codename
3104+# MILESTONE: i.e. Alpha 2
3105+# SERIAL: What is the build serial, i.e 20130213
3106+# OUT: File to execute
3107+
3108+set -x
3109+
3110+# Setup the QA tracker code
3111+bzr branch http://bazaar.launchpad.net/~jibel/+junk/qatracker
3112+cd qatracker
3113+sed -i "s/iso.qa.ubuntu.com/cloud.qa.ubuntu.com/g" tracker_update_result
3114+
3115+bzr branch http://bazaar.launchpad.net/~ubuntu-qa-website-devel/ubuntu-qa-website/python-qatracker
3116+ln -s python-qatracker/qatracker.py .
3117+export PATH="${PATH}:${WORKSPACE}/qatracker"
3118+
3119+# Get the actual working script
3120+${scripts}/tests/tracker.py \
3121+ --host ${HOST} \
3122+ --suite ${SUITE} \
3123+ --test ${TEST} \
3124+ --milestone "${MILESTONE}" \
3125+ --serial ${SERIAL} \
3126+ --out "${WORKSPACE}/script.sh"
3127+
3128+# Execute the script
3129+env API_USER="${API_USER}" \
3130+ API_KEY="${API_KEY}" \
3131+ bash ${WORKSPACE}/script.sh ||
3132+ exit 1 | tee publish.log
3133
3134=== added file 'jenkins/README.txt'
3135--- jenkins/README.txt 1970-01-01 00:00:00 +0000
3136+++ jenkins/README.txt 2018-05-31 04:33:07 +0000
3137@@ -0,0 +1,1 @@
3138+This directory contains the jobs that Jenkins executes. Most of the jobs just setup up an environmental component and then call another script, usually one directory below
3139
3140=== added file 'jenkins/Test_Azure.sh'
3141--- jenkins/Test_Azure.sh 1970-01-01 00:00:00 +0000
3142+++ jenkins/Test_Azure.sh 2018-05-31 04:33:07 +0000
3143@@ -0,0 +1,17 @@
3144+#!/bin/bash
3145+fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
3146+
3147+umask 022
3148+set -x
3149+source watch_properties || fail "Failed to read watch properties"
3150+
3151+echo "-------------------"
3152+echo "Image for testing:"
3153+cat watch_properties
3154+echo "-------------------"
3155+
3156+
3157+my_dir="$( cd "$( dirname "$0" )" && pwd )"
3158+base_dir=$(dirname ${my_dir})
3159+
3160+${my_dir}/tests/azure.sh ${1}
3161
3162=== added file 'jenkins/build_lib.sh'
3163--- jenkins/build_lib.sh 1970-01-01 00:00:00 +0000
3164+++ jenkins/build_lib.sh 2018-05-31 04:33:07 +0000
3165@@ -0,0 +1,33 @@
3166+#!/bin/bash
3167+
3168+# set default umask
3169+umask 022
3170+
3171+# Read in the common functions
3172+my_dir="$( cd "$( dirname "$0" )" && pwd )"
3173+base_dir=$(dirname ${my_dir})
3174+export PATH="${base_dir}:${my_dir}:${PATH}"
3175+source "${base_dir}/functions/locker"
3176+source "${base_dir}/functions/common"
3177+source "${base_dir}/functions/retry"
3178+
3179+dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
3180+
3181+[ -z "${DISTRO}" -a -n "${SUITE}" ] && DISTRO="${SUITE}"
3182+
3183+select_build_config() {
3184+
3185+ [ -z "${BUILDER_CLOUD_IMAGE}" ] && {
3186+ # Use the latest 14.04 LTS image to do the build.
3187+ BUILDER_CLOUD_IMAGE="http://cloud-images.ubuntu.com/releases/trusty/release/ubuntu-14.04-server-cloudimg-amd64-uefi1.img"
3188+ export cloud_init_cfg="cloud-trusty.cfg"
3189+ }
3190+
3191+ # For ppc64el, we use ppc64el images
3192+ [ "${ARCH_TYPE}" == "ppc64el" ] && {
3193+ export cloud_init_cfg="cloud-trusty-pp64el.cfg"
3194+ BUILDER_CLOUD_IMAGE="${BUILDER_CLOUD_IMAGE//amd64/ppc64el}"
3195+ export BUILDER_CLOUD_IMAGE="${BUILDER_CLOUD_IMAGE//uefi1/disk1}"
3196+ }
3197+ echo "Using ${BUILDER_CLOUD_IMAGE} to do the build"
3198+}
3199
3200=== added file 'jenkins/env-test.sh'
3201--- jenkins/env-test.sh 1970-01-01 00:00:00 +0000
3202+++ jenkins/env-test.sh 2018-05-31 04:33:07 +0000
3203@@ -0,0 +1,2 @@
3204+#!/bin/bash
3205+env
3206
3207=== added file 'launch_kvm.sh'
3208--- launch_kvm.sh 1970-01-01 00:00:00 +0000
3209+++ launch_kvm.sh 2018-05-31 04:33:07 +0000
3210@@ -0,0 +1,222 @@
3211+#!/bin/bash
3212+usage() {
3213+cat << EOF
3214+This program is a KVM wrapper for performing tasks inside a KVM Environment.
3215+Its primary goal is to help developers do dangerous tasks that their IS/IT
3216+deparment won't allow them to do on an existing machine.
3217+ --id <ARG> The ID you want to use to identify the KVM image
3218+ this is used to name the image
3219+ --disk-gb <ARG> Disk size you want to resize the image too
3220+ Default it to _add_ 30GB
3221+ --smp <ARG> KVM SMP options, defaults to:
3222+ ${smp_opt}
3223+ --mem <ARG> How much RAM do you want to use
3224+ --user-data <ARG> Cloud-Init user-data file
3225+ --cloud-config <ARG> Cloud-Init cloud-config file
3226+ --img-url <ARG> Location of the image file.
3227+ --raw-disk <ARG> Name of RAW disk to create and attach.
3228+ --raw-size <ARG> Size of RAW disk in GB.
3229+ --extra-disk <ARG> Add an extra disk, starting with /dev/vdd
3230+ --cloud-init-file <ARG> Additional file for the cloud-init data
3231+EOF
3232+exit 1
3233+}
3234+
3235+short_opts="h"
3236+long_opts="id:,ssh_port,disk-gb:,mem:,bzr-automated-ec2-builds:,cloud-config:,user-data:,kernel-url:,img-url:,raw-disk:,raw-size:,smp:,extra-disk:,cloud-init-file:,help"
3237+getopt_out=$(getopt --name "${0##*/}" \
3238+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
3239+ eval set -- "${getopt_out}" ||
3240+ usage
3241+
3242+builder_id=$(uuidgen)
3243+uuid=${builder_id}
3244+bname="server"
3245+size_gb=15
3246+mem=512
3247+smp_opt="4"
3248+ud=""
3249+cloud_config=""
3250+img_loc="${BUILDER_CLOUD_IMAGE:-http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-disk1.img}"
3251+KVM_PID=""
3252+
3253+while [ $# -ne 0 ]; do
3254+ cur=${1}; next=${2};
3255+ case "$cur" in
3256+ --id) id="$2"; shift;;
3257+ --disk-gb) size_gb="$2"; shift;;
3258+ --mem) mem="$2"; shift;;
3259+ --cloud-config) ccloud="$2"; shift;;
3260+ --user-data) ud="$2"; shift;;
3261+ --img-url) img_loc="$2"; shift;;
3262+ --raw-disk) raw_disk="$2"; shift;;
3263+ --raw-size) raw_size="$2"; shift;;
3264+ --smp) smp_opts="$2"; shift;;
3265+ --extra-disk) [ -z "${extra_disk}" ] && extra_disk=$2 || extra_disk="${extra_disk} $2"; shift;;
3266+ --cloud-init-file) [ -z "${cloud_init_files}" ] && cloud_init_files=$2 || cloud_init_files="${cloud_init_files} $2"; shift;;
3267+ -h|--help) usage; exit 0;;
3268+ --) shift; break;;
3269+ esac
3270+ shift;
3271+done
3272+
3273+work_d="$(mktemp -d /tmp/kvm-builder.XXXX)"
3274+kvm_pidfile="$(mktemp --tmpdir=${work_d})"
3275+
3276+error() { echo "$@" 1>&2; }
3277+cleanup() {
3278+ [ -n "${KVM_PID}" ] && kill -9 ${KVM_PID};
3279+ [ -n "${TAIL_PID}" ] && kill -9 ${TAIL_PID};
3280+ rm -rf "${work_d}";
3281+}
3282+fail() { error "$@"; cleanup; exit 1; }
3283+debug() { error "$(date -R):" "$@"; }
3284+sysfail() { fail "Failure in commands detected; purging "; }
3285+
3286+# Make sure that we kill everything
3287+trap sysfail SIGINT SIGTERM
3288+
3289+[ -z "${ud}" ] && fail "Must define user-data script via --user-data"
3290+[ -z "${ccloud}" ] && fail "Must define cloud-config script via --cloud-config"
3291+
3292+debug "Creating Cloud-Init configuration..."
3293+write_mime_args=(
3294+ -o "${work_d}/user-data.txt"
3295+ "${ccloud}"
3296+ "${ud}")
3297+write_mime_args+=(${cloud_init_files[@]})
3298+write_mime_location="$(which write-mime-multipart)"
3299+if which python3 > /dev/null; then
3300+ "${write_mime_location}" ${write_mime_args[@]} || fail "Unable to create user-data"
3301+else
3302+ python "${write_mime_location}" ${write_mime_args[@]} || fail "Unable to create user-data"
3303+fi
3304+
3305+echo "instance-id: $(uuidgen)" > "${work_d}/meta-data"
3306+echo "local-hostname: builder" >> "${work_d}/meta-data"
3307+
3308+debug "Creating Seed for Cloud-Init..."
3309+"${0%/*}/make-seed.sh" "${work_d}/seed.img" "${work_d}/user-data.txt" "${work_d}/meta-data" ||
3310+ fail "Failed to create Configruation ISO"
3311+
3312+# Place the image in place
3313+debug "Build image location is ${img_loc}"
3314+if [[ "${img_loc}" =~ "http" ]]; then
3315+ debug "Fetching cloud image from ${img_loc}"
3316+ curl -s -o "${work_d}/img-${builder_id}" "${img_loc}" ||
3317+ fail "Unable to fetch pristine image from '${img_loc}'"
3318+else
3319+ cp "${img_loc}" "${work_d}/img-${builder_id}" ||
3320+ fail "Unable to copy '${img_loc}'"
3321+fi
3322+
3323+debug "Adding ${size_gb}G to image size"
3324+qemu-img resize "${work_d}/img-${builder_id}" +"${size_gb}G" ||
3325+ fail "Unable to resize image to ${size_gb}G"
3326+
3327+if [ -n "${raw_disk}" -a ! -e "${raw_disk}" ]; then
3328+ if [ -n "${raw_size}" ]; then
3329+ dd if=/dev/zero of=${raw_disk} bs=1k count=1 seek=$((${raw_size} * 1024000)) &&
3330+ debug "Create new raw disk" ||
3331+ fail "Unable to create raw disk"
3332+ else
3333+ fail "Undefined raw disk size"
3334+ fi
3335+else
3336+ debug "Using existing raw disk."
3337+fi
3338+
3339+
3340+debug "________________________________________________"
3341+debug "Launching instance..."
3342+kvm_cmd=(
3343+ ${QEMU_COMMAND:-kvm}
3344+ -name ${uuid}
3345+ -drive file=${work_d}/img-${builder_id},if=virtio,bus=0,cache=unsafe,unit=0
3346+ -drive file=${raw_disk},if=virtio,format=raw,bus=0,unit=1
3347+ -drive file=${work_d}/seed.img,if=virtio,media=cdrom,bus=0,cache=unsafe,unit=2
3348+ -net nic,model=virtio
3349+ -net user
3350+ -no-reboot
3351+ -display none
3352+ -daemonize
3353+ -serial file:${work_d}/console.log
3354+ -pidfile ${kvm_pidfile}
3355+ )
3356+kvm_cmd+=(${QEMU_ARGS[@]})
3357+
3358+# Arch independant stuff
3359+if [[ "$(uname -p)" =~ "ppc64" ]]; then
3360+ # Use more memory for building on PPC64
3361+ kvm_cmd+=(-m 4G)
3362+else
3363+ kvm_cmd+=(-smp ${smp_opt} -m ${mem})
3364+fi
3365+
3366+# Allow for kernel and append
3367+if [ -n "${QEMU_KERNEL}" ]; then
3368+ root="/dev/vda1"
3369+ if [[ "$(uname -p)" =~ "ppc64" ]]; then
3370+ root="/dev/vda"
3371+ fi
3372+ kvm_cmd+=(-kernel ${QEMU_KERNEL}
3373+ -append "earlyprintk root=${root} console=hvc0"
3374+ )
3375+fi
3376+
3377+unit_c=3
3378+for disk in ${extra_disk}
3379+do
3380+ if [[ $(file ${disk}) =~ (disk|qcow|QCOW|vmdk|VMDK|vdi|VDI) ]]; then
3381+ debug "Adding extra disk $disk to KVM configuration"
3382+ kvm_cmd+=(-drive file=${extra_disk},if=virtio,bus=1,unit=${unit_c})
3383+ else
3384+ debug "Adding extra disk as a raw formated disk"
3385+ kvm_cmd+=(-drive file=${extra_disk},if=virtio,format=raw,bus=1,unit=${unit_c})
3386+ fi
3387+ unit_c=$((unit_c+1))
3388+done
3389+
3390+debug "KVM command is: ${kvm_cmd[@]}"
3391+"${kvm_cmd[@]}" ||
3392+ fail "Failed to launch KVM image\n${kvm_out}"
3393+
3394+read KVM_PID < ${kvm_pidfile}
3395+debug "KVM PID is: ${KVM_PID}"
3396+
3397+tail -f "${work_d}/console.log" &
3398+TAIL_PID=$!
3399+
3400+# Wait on the pid until the max timeout
3401+count=0
3402+max_count=${MAX_CYCLES:-720}
3403+while $(ps ${KVM_PID} > /dev/null 2>&1)
3404+do
3405+ sleep 10
3406+ count=$((count + 1))
3407+ if [ "${count}" -gt "${max_count}" ]; then
3408+ kill -15 ${KVM_PID}
3409+ debug "Build timed out...killing PID ${KVM_PID}"
3410+ fi
3411+done
3412+
3413+debug "________________________________________________"
3414+debug "KVM PID has ended. Work is done"
3415+kill -15 ${TAIL_PID}
3416+
3417+unset KVM_PID
3418+unset TAIL_PID
3419+
3420+[ -n "${raw_disk}" ] &&
3421+ debug "Extracting raw tarball" &&
3422+ { tar xvvf "${raw_disk}" || /bin/true; }
3423+
3424+[ ! -e success ] &&
3425+ fail "Tarball contents reported failure"
3426+
3427+cp "${work_d}/console.log" .
3428+
3429+# Wait for Cloud-Init to finish any work
3430+debug "Cleaning up..."
3431+cleanup
3432+exit 0
3433
3434=== added file 'maas_config.sh'
3435--- maas_config.sh 1970-01-01 00:00:00 +0000
3436+++ maas_config.sh 2018-05-31 04:33:07 +0000
3437@@ -0,0 +1,75 @@
3438+#!/bin/bash
3439+short_opts="h"
3440+long_opts="distro:,stream:,maas-branch:,out:,template:,serial:,local:,base-name:,out_d:"
3441+getopt_out=$(getopt --name "${0##*/}" \
3442+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
3443+ eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
3444+
3445+usage() {
3446+ cat <<EOM
3447+${0##/} - Populated values in build temple.
3448+
3449+ Required:
3450+ --distro Distro code name, i.e. precise
3451+ --template Template file
3452+ --stream Stream, i.e. daily, release
3453+ --base-name The name of the file to work on
3454+ --serial The build serial
3455+ --out The output file
3456+ --out_d Where to stuff the output files
3457+
3458+ Optional:
3459+ --maas-branch bzr branch for maas image code
3460+EOM
3461+}
3462+
3463+
3464+fail() { echo "${@}" 2>&1; exit 1;}
3465+
3466+serial="${serial:-$(date +%Y%m%d)}"
3467+maas_branch="${maas_branch:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
3468+template_f="${PWD}/img-maas.tmpl"
3469+
3470+while [ $# -ne 0 ]; do
3471+ cur=${1}; next=${2};
3472+ case "$cur" in
3473+ --distro) distro=$2; shift;;
3474+ --stream) stream=$2; shift;;
3475+ --local) local_d=$2; shift;;
3476+ --maas-branch) maas_branch=$2; shift;;
3477+ --base-name) base_name=$2; shift;;
3478+ --template) template_f=$2; shift;;
3479+ --out) out_f=$2; shift;;
3480+ --out_d) out_d=$2; shift;;
3481+ --) shift; break;;
3482+ esac
3483+ shift;
3484+done
3485+
3486+fail_usage() { fail "Must define $@"; }
3487+
3488+[ -z "${distro}" ] && fail_usage "--distro"
3489+[ -z "${stream}" ] && fail_usage "--stream"
3490+[ -z "${local_d}" ] && fail_usage "--local"
3491+[ -z "${out_f}" ] && fail_usage "--out"
3492+[ -z "${out_d}" ] && fail_usage "--out_d"
3493+[ -z "${base_name}" ] && fail_usage "--base-name"
3494+
3495+case "$distro" in
3496+ trusty) arches="${ARCH_TYPE:-i386 amd64 armhf}";
3497+ [[ "$(uname -m)" =~ ppc64 ]] && arches="ppc64el";;
3498+ *) arches="${ARCH_TYPE:-i386 amd64 armhf}";;
3499+esac
3500+
3501+sed -e "s,%d,${distro},g" \
3502+ -e "s,%S,${stream},g" \
3503+ -e "s,%M,${maas_branch},g" \
3504+ -e "s,%D,${local_d},g" \
3505+ -e "s,%B,${base_name},g" \
3506+ -e "s,%s,${serial},g" \
3507+ -e "s,%O,${out_d},g" \
3508+ -e "s,%A,${arches},g" \
3509+ ${template_f} > ${out_f} ||
3510+ fail "Unable to write template file"
3511+
3512+exit 0
3513
3514=== added file 'make-seed.sh'
3515--- make-seed.sh 1970-01-01 00:00:00 +0000
3516+++ make-seed.sh 2018-05-31 04:33:07 +0000
3517@@ -0,0 +1,147 @@
3518+#!/bin/bash
3519+
3520+VERBOSITY=0
3521+TEMP_D=""
3522+DEF_DISK_FORMAT="raw"
3523+DEF_FILESYSTEM="iso9660"
3524+
3525+error() { echo "$@" 1>&2; }
3526+errorp() { printf "$@" 1>&2; }
3527+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
3528+failp() { [ $# -eq 0 ] || errorp "$@"; exit 1; }
3529+
3530+Usage() {
3531+ cat <<EOF
3532+Usage: ${0##*/} [ options ] output user-data [meta-data]
3533+
3534+ Create a disk for cloud-init to utilize nocloud
3535+
3536+ options:
3537+ -h | --help show usage
3538+ -d | --disk-format D disk format to output. default: raw
3539+ -f | --filesystem F filesystem format (vfat or iso), default: iso9660
3540+
3541+ -i | --interfaces F write network interfaces file into metadata
3542+ -m | --dsmode M add 'dsmode' ('local' or 'net') to the metadata
3543+ default in cloud-init is 'net', meaning network is
3544+ required.
3545+
3546+ Example:
3547+ * cat my-user-data
3548+ #cloud-config
3549+ password: passw0rd
3550+ chpasswd: { expire: False }
3551+ ssh_pwauth: True
3552+ * echo "instance-id: \$(uuidgen || echo i-abcdefg)" > my-meta-data
3553+ * ${0##*/} my-seed.img my-user-data my-meta-data
3554+EOF
3555+}
3556+
3557+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
3558+cleanup() {
3559+ [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
3560+}
3561+
3562+debug() {
3563+ local level=${1}; shift;
3564+ [ "${level}" -gt "${VERBOSITY}" ] && return
3565+ error "${@}"
3566+}
3567+
3568+short_opts="hi:d:f:m:o:v"
3569+long_opts="disk-format:,dsmode:,filesystem:,help,interfaces:,output:,verbose"
3570+getopt_out=$(getopt --name "${0##*/}" \
3571+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
3572+ eval set -- "${getopt_out}" ||
3573+ bad_Usage
3574+
3575+## <<insert default variables here>>
3576+output=""
3577+userdata=""
3578+metadata=""
3579+filesystem=$DEF_FILESYSTEM
3580+diskformat=$DEF_DISK_FORMAT
3581+interfaces=_unset
3582+dsmode=""
3583+
3584+
3585+while [ $# -ne 0 ]; do
3586+ cur=${1}; next=${2};
3587+ case "$cur" in
3588+ -h|--help) Usage ; exit 0;;
3589+ -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
3590+ -d|--disk-format) diskformat=$next; shift;;
3591+ -f|--filesystem) filesystem=$next; shift;;
3592+ -m|--dsmode) dsmode=$next; shift;;
3593+ -i|--interfaces) interfaces=$next; shift;;
3594+ --) shift; break;;
3595+ esac
3596+ shift;
3597+done
3598+
3599+## check arguments here
3600+## how many args do you expect?
3601+[ $# -ge 1 ] || bad_Usage "must provide output, userdata"
3602+[ $# -le 3 ] || bad_Usage "confused by additional args"
3603+
3604+output=$1
3605+userdata=$2
3606+metadata=$3
3607+
3608+[ -n "$metadata" -a "${interfaces}" != "_unset" ] &&
3609+ fail "metadata and --interfaces are incompatible"
3610+[ -n "$metadata" -a -n "$dsmode" ] &&
3611+ fail "metadata and dsmode are incompatible"
3612+[ "$interfaces" = "_unset" -o -r "$interfaces" ] ||
3613+ fail "$interfaces: not a readable file"
3614+
3615+TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
3616+ fail "failed to make tempdir"
3617+trap cleanup EXIT
3618+
3619+if [ -n "$metadata" ]; then
3620+ cp "$metadata" "$TEMP_D/meta-data" || fail "$metadata: failed to copy"
3621+else
3622+ {
3623+ echo "instance-id: iid-local01"
3624+ [ -n "$dsmode" ] && echo "dsmode: $dsmode"
3625+ [ -n "$interfaces" ] && echo "interfaces: |" &&
3626+ sed 's,^, ,' "$interfaces"
3627+ } > "$TEMP_D/meta-data"
3628+fi
3629+
3630+if [ "$userdata" = "-" ]; then
3631+ cat > "$TEMP_D/user-data" || fail "failed to read from stdin"
3632+else
3633+ cp "$userdata" "$TEMP_D/user-data" || fail "$userdata: failed to copy"
3634+fi
3635+
3636+## alternatively, create a vfat filesystem with same files
3637+img="$TEMP_D/seed.img"
3638+truncate --size 100K "$img" || fail "failed truncate image"
3639+
3640+case "$filesystem" in
3641+ iso9660|iso)
3642+ genisoimage -output "$img" -volid cidata \
3643+ -joliet -rock "$TEMP_D/user-data" "$TEMP_D/meta-data" \
3644+ > "$TEMP_D/err" 2>&1 ||
3645+ { cat "$TEMP_D/err" 1>&2; fail "failed to genisoimage"; }
3646+ ;;
3647+ vfat)
3648+ mkfs.vfat -n cidata "$img" || fail "failed mkfs.vfat"
3649+ mcopy -oi "$img" "$TEMP_D/user-data" "$TEMP_D/meta-data" :: ||
3650+ fail "failed to copy user-data, meta-data to img"
3651+ ;;
3652+ *) fail "unknown filesystem $filesystem";;
3653+esac
3654+
3655+[ "$output" = "-" ] && output="$TEMP_D/final"
3656+qemu-img convert -f raw -O "$diskformat" "$img" "$output" ||
3657+ fail "failed to convert to disk format $diskformat"
3658+
3659+[ "$output" != "$TEMP_D/final" ] || { cat "$output" && output="-"; } ||
3660+ fail "failed to write to -"
3661+
3662+error "wrote ${output} with filesystem=$filesystem and diskformat=$diskformat"
3663+# vi: ts=4 noexpandtab
3664+
3665
3666=== added file 'overlay.sh'
3667--- overlay.sh 1970-01-01 00:00:00 +0000
3668+++ overlay.sh 2018-05-31 04:33:07 +0000
3669@@ -0,0 +1,23 @@
3670+#!/bin/bash
3671+# Overlays a new branch over this branch. This allows you to reuse code
3672+# from this branch against a development branch or a private branch.
3673+
3674+my_script_path=$(readlink -f ${0})
3675+my_s_dir=$(dirname ${my_script_path})
3676+
3677+source "${my_s_dir}/functions/bzr_check.sh"
3678+source "${my_s_dir}/functions/common.sh"
3679+
3680+rsync_merge() {
3681+ debug "Merging ${1} with private"
3682+ [ -d ${1} ] || fail "no such directory ${1} for merging!"
3683+ rsync -av ${1}/* ${my_s_dir} ||
3684+ fail "failed to merge ${1}"
3685+}
3686+
3687+for i in ${@}
3688+do
3689+ check_tmp=$(mktemp --directory --tmpdir=${my_s_dir} bzrbranch.XXX)
3690+ check_branch ${i} ${check_tmp}
3691+ rsync_merge ${check_tmp}
3692+done
3693
3694=== added directory 'pylib'
3695=== added directory 'pylib/changelogger'
3696=== added file 'pylib/changelogger.py'
3697--- pylib/changelogger.py 1970-01-01 00:00:00 +0000
3698+++ pylib/changelogger.py 2018-05-31 04:33:07 +0000
3699@@ -0,0 +1,222 @@
3700+from __future__ import print_function
3701+
3702+import logging
3703+import re
3704+import requests
3705+import subprocess
3706+from debian.changelog import (Changelog)
3707+
3708+# http://changelogs.ubuntu.com/changelogs/pool/main/l/linux-meta/linux-meta_4.2.0.17.19/changelog
3709+changelog_url_base = \
3710+ "http://changelogs.ubuntu.com/changelogs/pool/{}/{}/{}/{}_{}/changelog"
3711+
3712+
3713+class ChangeDelta(object):
3714+
3715+ def __init__(self, pkg, changelog=None):
3716+ self._pkg = pkg
3717+ self._changelogs = changelog
3718+
3719+ def _set_pkg(self, pkg):
3720+ if pkg is not None:
3721+ self._pkg = str(pkg)
3722+
3723+ def _get_pkg(self):
3724+ return self._pkg
3725+
3726+ pkg = property(_get_pkg, _set_pkg)
3727+
3728+ def _get_changelogs(self):
3729+ try:
3730+ return self._changelogs
3731+ except KeyError:
3732+ return []
3733+
3734+ def _set_changelogs(self, changelogs):
3735+ self._changelogs = changelogs
3736+
3737+ changelogs = property(_get_changelogs, _set_changelogs)
3738+
3739+ def iter_changelogs(self, text=False):
3740+ for block in self.changelogs:
3741+ if text:
3742+ yield str(block)
3743+ else:
3744+ yield block
3745+
3746+ def _get_bug_cves(self):
3747+ """extract information from the changelog block"""
3748+ mappings, bugs, cves = ({}, [], [])
3749+ cve_re = re.compile("CVE-\d+-\d+")
3750+ bug_re = re.compile("\(LP:.#(\d+)\)")
3751+
3752+ for block in self.iter_changelogs():
3753+ _block = str(block)
3754+ cves_in_block = cve_re.findall(_block)
3755+ cves.extend(cves_in_block)
3756+
3757+ bugs_in_block = bug_re.findall(_block)
3758+ bugs.extend(bugs_in_block)
3759+
3760+ ver = str(block.version)
3761+ mappings[ver] = {'cves': cves_in_block,
3762+ 'bugs': bugs_in_block}
3763+
3764+ self.mappings = mappings
3765+ self.cves = cves
3766+ self.bugs = bugs
3767+
3768+ def _get_cves(self):
3769+ self._get_bug_cves()
3770+ return self._cves
3771+
3772+ def _set_cves(self, cves):
3773+ self._cves = cves
3774+
3775+ cves = property(_get_cves, _set_cves)
3776+
3777+ def cve_in_delta(self):
3778+ if len(self.cves) > 0:
3779+ return True
3780+ return False
3781+
3782+ def _get_bugs(self):
3783+ self._get_bug_cves()
3784+ return self._bugs
3785+
3786+ def _set_bugs(self, bugs):
3787+ self._bugs = bugs
3788+
3789+ bugs = property(_get_bugs, _set_bugs)
3790+
3791+ def _get_min_version(self):
3792+ if self.changelogs:
3793+ return self.changelogs[-1].version
3794+
3795+ min_version = property(_get_min_version)
3796+
3797+ def _get_max_version(self):
3798+ if self.changelogs:
3799+ return self.changelogs[0].version
3800+
3801+ max_version = property(_get_max_version)
3802+
3803+ def _get_mappings(self):
3804+ try:
3805+ return self._mappings
3806+ except KeyError:
3807+ return {}
3808+
3809+ def _set_mappings(self, mapping):
3810+ self._mappings = mapping
3811+
3812+ mappings = property(_get_mappings, _set_mappings)
3813+
3814+ def format_changelogs(self):
3815+ changeblocks = "\n".join(self.iter_changelogs(text=True))
3816+ return changeblocks
3817+
3818+ def __str__():
3819+ return self.format_changelogs()
3820+
3821+
3822+class ReadChangeLog(Changelog):
3823+
3824+ def __init__(self, pkg, version):
3825+ self.logger = logging.getLogger("__changelog_{}__".format(pkg))
3826+ logging.basicConfig(format=
3827+ '%(asctime)s %(levelname)s - [PARSING {}] %(message)s'.format(
3828+ pkg))
3829+ self.logger.setLevel(logging.DEBUG)
3830+ self.logger.debug("Parsing changelog for {}".format(version))
3831+
3832+ ch_url = self.get_changelog_url(pkg, version)
3833+ self.logger.debug("URL: {}".format(ch_url))
3834+ try:
3835+ raw_changelog = self.get_changelog_from_url(ch_url)
3836+ Changelog.__init__(self, raw_changelog)
3837+ except Exception as e:
3838+ self.logger.debug("Failed to parse changelog!\n{}".format(e))
3839+
3840+ self.min_version = self._blocks[-1].version
3841+ self.max_version = self.version
3842+
3843+ self.logger.debug("Opened changelog:")
3844+ self.logger.debug(" Versions {} through {}".format(self.min_version,
3845+ self.max_version))
3846+
3847+ def get_changelog_url(self, pkg, version, pocket='main', url=None):
3848+ """Return the changelog URL"""
3849+
3850+ url = url or changelog_url_base
3851+ pdir = pkg[0]
3852+ if pkg.startswith("lib"):
3853+ pdir = pkg[:4]
3854+ return url.format(pocket, pdir, pkg, pkg, version)
3855+
3856+ def get_changelog_from_url(self, url):
3857+ """Fetch the change log"""
3858+ try:
3859+ chlog = requests.get(url)
3860+ if chlog.status_code == requests.codes.ok:
3861+ return chlog.text
3862+ else:
3863+ chlog.raise_for_status()
3864+
3865+ except requests.exceptions.HTTPError as e:
3866+ self.logger.critical("Failed to fetch changelog at {}:\n{}".format(
3867+ url, e))
3868+
3869+ def compare_versions(self, v1, operator, v2):
3870+ """Dirty, slow hack to compare versions"""
3871+ cmd = ['/usr/bin/dpkg', '--compare-versions', str(v1), str(operator),
3872+ str(v2)]
3873+ try:
3874+ subprocess.check_call(cmd)
3875+ except subprocess.CalledProcessError as e:
3876+ return False
3877+
3878+ return True
3879+
3880+ def iter_changeblocks(self):
3881+ """Iterate over the change logs"""
3882+ for block in self._blocks:
3883+ yield block
3884+
3885+ def get_changes_between(self, minv=None, maxv=None, commits=None):
3886+ """Get the changes between two versions"""
3887+ blocks = []
3888+ # Don't waste CPU time if we are getting the whole log
3889+ if minv is None and maxv is None:
3890+ for block in self.iter_changeblocks():
3891+ blocks.append(block)
3892+
3893+ # Now deal with changes between
3894+ minver = minv or self.min_version
3895+ maxver = maxv or self.max_version
3896+
3897+ # Allow for comparing the latest version against arbitrary counts
3898+ # i.e. you don't have to know the prior version
3899+ if minver <= -1:
3900+ minver = self.versions[(abs(minver) - 1)]
3901+
3902+ if minv or maxv:
3903+ for block in self.iter_changeblocks():
3904+ bver = block.version
3905+ if minv:
3906+ if not self.compare_versions(bver, 'ge', minver):
3907+ continue
3908+ if not maxv:
3909+ blocks.append(block)
3910+ elif self.compare_versions(bver, 'le', maxver):
3911+ blocks.append(block)
3912+ elif maxv:
3913+ if not self.compare_versions(bver, 'le', maxver):
3914+ continue
3915+ if not minv:
3916+ blocks.append(block)
3917+ elif self.compare_versions(bver, 'ge', minver):
3918+ blocks.append(block)
3919+
3920+ ret = ChangeDelta(self.package, blocks)
3921+ return ret
3922
3923=== added file 'pylib/changelogger/ChangeLogger.py'
3924--- pylib/changelogger/ChangeLogger.py 1970-01-01 00:00:00 +0000
3925+++ pylib/changelogger/ChangeLogger.py 2018-05-31 04:33:07 +0000
3926@@ -0,0 +1,222 @@
3927+from __future__ import print_function
3928+
3929+import logging
3930+import re
3931+import requests
3932+import subprocess
3933+from debian.changelog import (Changelog)
3934+
3935+# http://changelogs.ubuntu.com/changelogs/pool/main/l/linux-meta/linux-meta_4.2.0.17.19/changelog
3936+changelog_url_base = \
3937+ "http://changelogs.ubuntu.com/changelogs/pool/{}/{}/{}/{}_{}/changelog"
3938+
3939+
3940+class ChangeDelta(object):
3941+
3942+ def __init__(self, pkg, changelog=None):
3943+ self._pkg = pkg
3944+ self._changelogs = changelog
3945+
3946+ def _set_pkg(self, pkg):
3947+ if pkg is not None:
3948+ self._pkg = str(pkg)
3949+
3950+ def _get_pkg(self):
3951+ return self._pkg
3952+
3953+ pkg = property(_get_pkg, _set_pkg)
3954+
3955+ def _get_changelogs(self):
3956+ try:
3957+ return self._changelogs
3958+ except KeyError:
3959+ return []
3960+
3961+ def _set_changelogs(self, changelogs):
3962+ self._changelogs = changelogs
3963+
3964+ changelogs = property(_get_changelogs, _set_changelogs)
3965+
3966+ def iter_changelogs(self, text=False):
3967+ for block in self.changelogs:
3968+ if text:
3969+ yield str(block)
3970+ else:
3971+ yield block
3972+
3973+ def _get_bug_cves(self):
3974+ """extract information from the changelog block"""
3975+ mappings, bugs, cves = ({}, [], [])
3976+ cve_re = re.compile("CVE-\d+-\d+")
3977+ bug_re = re.compile("\(LP:.#(\d+)\)")
3978+
3979+ for block in self.iter_changelogs():
3980+ _block = str(block)
3981+ cves_in_block = cve_re.findall(_block)
3982+ cves.extend(cves_in_block)
3983+
3984+ bugs_in_block = bug_re.findall(_block)
3985+ bugs.extend(bugs_in_block)
3986+
3987+ ver = str(block.version)
3988+ mappings[ver] = {'cves': cves_in_block,
3989+ 'bugs': bugs_in_block}
3990+
3991+ self.mappings = mappings
3992+ self.cves = cves
3993+ self.bugs = bugs
3994+
3995+ def _get_cves(self):
3996+ self._get_bug_cves()
3997+ return self._cves
3998+
3999+ def _set_cves(self, cves):
4000+ self._cves = cves
4001+
4002+ cves = property(_get_cves, _set_cves)
4003+
4004+ def cve_in_delta(self):
4005+ if len(self.cves) > 0:
4006+ return True
4007+ return False
4008+
4009+ def _get_bugs(self):
4010+ self._get_bug_cves()
4011+ return self._bugs
4012+
4013+ def _set_bugs(self, bugs):
4014+ self._bugs = bugs
4015+
4016+ bugs = property(_get_bugs, _set_bugs)
4017+
4018+ def _get_min_version(self):
4019+ if self.changelogs:
4020+ return self.changelogs[-1].version
4021+
4022+ min_version = property(_get_min_version)
4023+
4024+ def _get_max_version(self):
4025+ if self.changelogs:
4026+ return self.changelogs[0].version
4027+
4028+ max_version = property(_get_max_version)
4029+
4030+ def _get_mappings(self):
4031+ try:
4032+ return self._mappings
4033+ except KeyError:
4034+ return {}
4035+
4036+ def _set_mappings(self, mapping):
4037+ self._mappings = mapping
4038+
4039+ mappings = property(_get_mappings, _set_mappings)
4040+
4041+ def format_changelogs(self):
4042+ changeblocks = "\n".join(self.iter_changelogs(text=True))
4043+ return changeblocks
4044+
4045+ def __str__():
4046+ return self.format_changelogs()
4047+
4048+
4049+class ReadChangelog(Changelog):
4050+
4051+ def __init__(self, pkg, version):
4052+ self.logger = logging.getLogger("__changelog_{}__".format(pkg))
4053+ logging.basicConfig(format=
4054+ '%(asctime)s %(levelname)s - [PARSING {}] %(message)s'.format(
4055+ pkg))
4056+ self.logger.setLevel(logging.DEBUG)
4057+ self.logger.debug("Parsing changelog for {}".format(version))
4058+
4059+ ch_url = self.get_changelog_url(pkg, version)
4060+ self.logger.debug("URL: {}".format(ch_url))
4061+ try:
4062+ raw_changelog = self.get_changelog_from_url(ch_url)
4063+ Changelog.__init__(self, raw_changelog)
4064+ except Exception as e:
4065+ self.logger.debug("Failed to parse changelog!\n{}".format(e))
4066+
4067+ self.min_version = self._blocks[-1].version
4068+ self.max_version = self.version
4069+
4070+ self.logger.debug("Opened changelog:")
4071+ self.logger.debug(" Versions {} through {}".format(self.min_version,
4072+ self.max_version))
4073+
4074+ def get_changelog_url(self, pkg, version, pocket='main', url=None):
4075+ """Return the changelog URL"""
4076+
4077+ url = url or changelog_url_base
4078+ pdir = pkg[0]
4079+ if pkg.startswith("lib"):
4080+ pdir = pkg[:4]
4081+ return url.format(pocket, pdir, pkg, pkg, version)
4082+
4083+ def get_changelog_from_url(self, url):
4084+ """Fetch the change log"""
4085+ try:
4086+ chlog = requests.get(url)
4087+ if chlog.status_code == requests.codes.ok:
4088+ return chlog.text
4089+ else:
4090+ chlog.raise_for_status()
4091+
4092+ except requests.exceptions.HTTPError as e:
4093+ self.logger.critical("Failed to fetch changelog at {}:\n{}".format(
4094+ url, e))
4095+
4096+ def compare_versions(self, v1, operator, v2):
4097+ """Dirty, slow hack to compare versions"""
4098+ cmd = ['/usr/bin/dpkg', '--compare-versions', str(v1), str(operator),
4099+ str(v2)]
4100+ try:
4101+ subprocess.check_call(cmd)
4102+ except subprocess.CalledProcessError as e:
4103+ return False
4104+
4105+ return True
4106+
4107+ def iter_changeblocks(self):
4108+ """Iterate over the change logs"""
4109+ for block in self._blocks:
4110+ yield block
4111+
4112+ def get_changes_between(self, minv=None, maxv=None, commits=None):
4113+ """Get the changes between two versions"""
4114+ blocks = []
4115+ # Don't waste CPU time if we are getting the whole log
4116+ if minv is None and maxv is None:
4117+ for block in self.iter_changeblocks():
4118+ blocks.append(block)
4119+
4120+ # Now deal with changes between
4121+ minver = minv or self.min_version
4122+ maxver = maxv or self.max_version
4123+
4124+ # Allow for comparing the latest version against arbitrary counts
4125+ # i.e. you don't have to know the prior version
4126+ if minver <= -1:
4127+ minver = self.versions[(abs(minver) - 1)]
4128+
4129+ if minv or maxv:
4130+ for block in self.iter_changeblocks():
4131+ bver = block.version
4132+ if minv:
4133+ if not self.compare_versions(bver, 'ge', minver):
4134+ continue
4135+ if not maxv:
4136+ blocks.append(block)
4137+ elif self.compare_versions(bver, 'le', maxver):
4138+ blocks.append(block)
4139+ elif maxv:
4140+ if not self.compare_versions(bver, 'le', maxver):
4141+ continue
4142+ if not minv:
4143+ blocks.append(block)
4144+ elif self.compare_versions(bver, 'ge', minver):
4145+ blocks.append(block)
4146+
4147+ ret = ChangeDelta(self.package, blocks)
4148+ return ret
4149
4150=== added file 'pylib/changelogger/__init__.py'
4151=== added directory 'pylib/requests'
4152=== added file 'pylib/requests/__init__.py'
4153--- pylib/requests/__init__.py 1970-01-01 00:00:00 +0000
4154+++ pylib/requests/__init__.py 2018-05-31 04:33:07 +0000
4155@@ -0,0 +1,77 @@
4156+# -*- coding: utf-8 -*-
4157+
4158+# __
4159+# /__) _ _ _ _ _/ _
4160+# / ( (- (/ (/ (- _) / _)
4161+# /
4162+
4163+"""
4164+requests HTTP library
4165+~~~~~~~~~~~~~~~~~~~~~
4166+
4167+Requests is an HTTP library, written in Python, for human beings. Basic GET
4168+usage:
4169+
4170+ >>> import requests
4171+ >>> r = requests.get('http://python.org')
4172+ >>> r.status_code
4173+ 200
4174+ >>> 'Python is a programming language' in r.content
4175+ True
4176+
4177+... or POST:
4178+
4179+ >>> payload = dict(key1='value1', key2='value2')
4180+ >>> r = requests.post("http://httpbin.org/post", data=payload)
4181+ >>> print(r.text)
4182+ {
4183+ ...
4184+ "form": {
4185+ "key2": "value2",
4186+ "key1": "value1"
4187+ },
4188+ ...
4189+ }
4190+
4191+The other HTTP methods are supported - see `requests.api`. Full documentation
4192+is at <http://python-requests.org>.
4193+
4194+:copyright: (c) 2014 by Kenneth Reitz.
4195+:license: Apache 2.0, see LICENSE for more details.
4196+
4197+"""
4198+
4199+__title__ = 'requests'
4200+__version__ = '2.3.0'
4201+__build__ = 0x020300
4202+__author__ = 'Kenneth Reitz'
4203+__license__ = 'Apache 2.0'
4204+__copyright__ = 'Copyright 2014 Kenneth Reitz'
4205+
4206+# Attempt to enable urllib3's SNI support, if possible
4207+try:
4208+ from .packages.urllib3.contrib import pyopenssl
4209+ pyopenssl.inject_into_urllib3()
4210+except ImportError:
4211+ pass
4212+
4213+from . import utils
4214+from .models import Request, Response, PreparedRequest
4215+from .api import request, get, head, post, patch, put, delete, options
4216+from .sessions import session, Session
4217+from .status_codes import codes
4218+from .exceptions import (
4219+ RequestException, Timeout, URLRequired,
4220+ TooManyRedirects, HTTPError, ConnectionError
4221+)
4222+
4223+# Set default logging handler to avoid "No handler found" warnings.
4224+import logging
4225+try: # Python 2.7+
4226+ from logging import NullHandler
4227+except ImportError:
4228+ class NullHandler(logging.Handler):
4229+ def emit(self, record):
4230+ pass
4231+
4232+logging.getLogger(__name__).addHandler(NullHandler())
4233
4234=== added file 'pylib/requests/adapters.py'
4235--- pylib/requests/adapters.py 1970-01-01 00:00:00 +0000
4236+++ pylib/requests/adapters.py 2018-05-31 04:33:07 +0000
4237@@ -0,0 +1,388 @@
4238+# -*- coding: utf-8 -*-
4239+
4240+"""
4241+requests.adapters
4242+~~~~~~~~~~~~~~~~~
4243+
4244+This module contains the transport adapters that Requests uses to define
4245+and maintain connections.
4246+"""
4247+
4248+import socket
4249+
4250+from .models import Response
4251+from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
4252+from .packages.urllib3.response import HTTPResponse
4253+from .packages.urllib3.util import Timeout as TimeoutSauce
4254+from .compat import urlparse, basestring, urldefrag, unquote
4255+from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
4256+ prepend_scheme_if_needed, get_auth_from_url)
4257+from .structures import CaseInsensitiveDict
4258+from .packages.urllib3.exceptions import MaxRetryError
4259+from .packages.urllib3.exceptions import TimeoutError
4260+from .packages.urllib3.exceptions import SSLError as _SSLError
4261+from .packages.urllib3.exceptions import HTTPError as _HTTPError
4262+from .packages.urllib3.exceptions import ProxyError as _ProxyError
4263+from .cookies import extract_cookies_to_jar
4264+from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
4265+from .auth import _basic_auth_str
4266+
4267+DEFAULT_POOLBLOCK = False
4268+DEFAULT_POOLSIZE = 10
4269+DEFAULT_RETRIES = 0
4270+
4271+
4272+class BaseAdapter(object):
4273+ """The Base Transport Adapter"""
4274+
4275+ def __init__(self):
4276+ super(BaseAdapter, self).__init__()
4277+
4278+ def send(self):
4279+ raise NotImplementedError
4280+
4281+ def close(self):
4282+ raise NotImplementedError
4283+
4284+
4285+class HTTPAdapter(BaseAdapter):
4286+ """The built-in HTTP Adapter for urllib3.
4287+
4288+ Provides a general-case interface for Requests sessions to contact HTTP and
4289+ HTTPS urls by implementing the Transport Adapter interface. This class will
4290+ usually be created by the :class:`Session <Session>` class under the
4291+ covers.
4292+
4293+ :param pool_connections: The number of urllib3 connection pools to cache.
4294+ :param pool_maxsize: The maximum number of connections to save in the pool.
4295+ :param int max_retries: The maximum number of retries each connection
4296+ should attempt. Note, this applies only to failed connections and
4297+ timeouts, never to requests where the server returns a response.
4298+ :param pool_block: Whether the connection pool should block for connections.
4299+
4300+ Usage::
4301+
4302+ >>> import requests
4303+ >>> s = requests.Session()
4304+ >>> a = requests.adapters.HTTPAdapter(max_retries=3)
4305+ >>> s.mount('http://', a)
4306+ """
4307+ __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
4308+ '_pool_block']
4309+
4310+ def __init__(self, pool_connections=DEFAULT_POOLSIZE,
4311+ pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
4312+ pool_block=DEFAULT_POOLBLOCK):
4313+ self.max_retries = max_retries
4314+ self.config = {}
4315+ self.proxy_manager = {}
4316+
4317+ super(HTTPAdapter, self).__init__()
4318+
4319+ self._pool_connections = pool_connections
4320+ self._pool_maxsize = pool_maxsize
4321+ self._pool_block = pool_block
4322+
4323+ self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
4324+
4325+ def __getstate__(self):
4326+ return dict((attr, getattr(self, attr, None)) for attr in
4327+ self.__attrs__)
4328+
4329+ def __setstate__(self, state):
4330+ # Can't handle by adding 'proxy_manager' to self.__attrs__ because
4331+ # because self.poolmanager uses a lambda function, which isn't pickleable.
4332+ self.proxy_manager = {}
4333+ self.config = {}
4334+
4335+ for attr, value in state.items():
4336+ setattr(self, attr, value)
4337+
4338+ self.init_poolmanager(self._pool_connections, self._pool_maxsize,
4339+ block=self._pool_block)
4340+
4341+ def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
4342+ """Initializes a urllib3 PoolManager. This method should not be called
4343+ from user code, and is only exposed for use when subclassing the
4344+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
4345+
4346+ :param connections: The number of urllib3 connection pools to cache.
4347+ :param maxsize: The maximum number of connections to save in the pool.
4348+ :param block: Block when no free connections are available.
4349+ """
4350+ # save these values for pickling
4351+ self._pool_connections = connections
4352+ self._pool_maxsize = maxsize
4353+ self._pool_block = block
4354+
4355+ self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
4356+ block=block)
4357+
4358+ def cert_verify(self, conn, url, verify, cert):
4359+ """Verify a SSL certificate. This method should not be called from user
4360+ code, and is only exposed for use when subclassing the
4361+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
4362+
4363+ :param conn: The urllib3 connection object associated with the cert.
4364+ :param url: The requested URL.
4365+ :param verify: Whether we should actually verify the certificate.
4366+ :param cert: The SSL certificate to verify.
4367+ """
4368+ if url.lower().startswith('https') and verify:
4369+
4370+ cert_loc = None
4371+
4372+ # Allow self-specified cert location.
4373+ if verify is not True:
4374+ cert_loc = verify
4375+
4376+ if not cert_loc:
4377+ cert_loc = DEFAULT_CA_BUNDLE_PATH
4378+
4379+ if not cert_loc:
4380+ raise Exception("Could not find a suitable SSL CA certificate bundle.")
4381+
4382+ conn.cert_reqs = 'CERT_REQUIRED'
4383+ conn.ca_certs = cert_loc
4384+ else:
4385+ conn.cert_reqs = 'CERT_NONE'
4386+ conn.ca_certs = None
4387+
4388+ if cert:
4389+ if not isinstance(cert, basestring):
4390+ conn.cert_file = cert[0]
4391+ conn.key_file = cert[1]
4392+ else:
4393+ conn.cert_file = cert
4394+
4395+ def build_response(self, req, resp):
4396+ """Builds a :class:`Response <requests.Response>` object from a urllib3
4397+ response. This should not be called from user code, and is only exposed
4398+ for use when subclassing the
4399+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
4400+
4401+ :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
4402+ :param resp: The urllib3 response object.
4403+ """
4404+ response = Response()
4405+
4406+ # Fallback to None if there's no status_code, for whatever reason.
4407+ response.status_code = getattr(resp, 'status', None)
4408+
4409+ # Make headers case-insensitive.
4410+ response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
4411+
4412+ # Set encoding.
4413+ response.encoding = get_encoding_from_headers(response.headers)
4414+ response.raw = resp
4415+ response.reason = response.raw.reason
4416+
4417+ if isinstance(req.url, bytes):
4418+ response.url = req.url.decode('utf-8')
4419+ else:
4420+ response.url = req.url
4421+
4422+ # Add new cookies from the server.
4423+ extract_cookies_to_jar(response.cookies, req, resp)
4424+
4425+ # Give the Response some context.
4426+ response.request = req
4427+ response.connection = self
4428+
4429+ return response
4430+
4431+ def get_connection(self, url, proxies=None):
4432+ """Returns a urllib3 connection for the given URL. This should not be
4433+ called from user code, and is only exposed for use when subclassing the
4434+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
4435+
4436+ :param url: The URL to connect to.
4437+ :param proxies: (optional) A Requests-style dictionary of proxies used on this request.
4438+ """
4439+ proxies = proxies or {}
4440+ proxy = proxies.get(urlparse(url.lower()).scheme)
4441+
4442+ if proxy:
4443+ proxy = prepend_scheme_if_needed(proxy, 'http')
4444+ proxy_headers = self.proxy_headers(proxy)
4445+
4446+ if not proxy in self.proxy_manager:
4447+ self.proxy_manager[proxy] = proxy_from_url(
4448+ proxy,
4449+ proxy_headers=proxy_headers,
4450+ num_pools=self._pool_connections,
4451+ maxsize=self._pool_maxsize,
4452+ block=self._pool_block)
4453+
4454+ conn = self.proxy_manager[proxy].connection_from_url(url)
4455+ else:
4456+ # Only scheme should be lower case
4457+ parsed = urlparse(url)
4458+ url = parsed.geturl()
4459+ conn = self.poolmanager.connection_from_url(url)
4460+
4461+ return conn
4462+
4463+ def close(self):
4464+ """Disposes of any internal state.
4465+
4466+ Currently, this just closes the PoolManager, which closes pooled
4467+ connections.
4468+ """
4469+ self.poolmanager.clear()
4470+
4471+ def request_url(self, request, proxies):
4472+ """Obtain the url to use when making the final request.
4473+
4474+ If the message is being sent through a HTTP proxy, the full URL has to
4475+ be used. Otherwise, we should only use the path portion of the URL.
4476+
4477+ This should not be called from user code, and is only exposed for use
4478+ when subclassing the
4479+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
4480+
4481+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
4482+ :param proxies: A dictionary of schemes to proxy URLs.
4483+ """
4484+ proxies = proxies or {}
4485+ scheme = urlparse(request.url).scheme
4486+ proxy = proxies.get(scheme)
4487+
4488+ if proxy and scheme != 'https':
4489+ url, _ = urldefrag(request.url)
4490+ else:
4491+ url = request.path_url
4492+
4493+ return url
4494+
4495+ def add_headers(self, request, **kwargs):
4496+ """Add any headers needed by the connection. As of v2.0 this does
4497+ nothing by default, but is left for overriding by users that subclass
4498+ the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
4499+
4500+ This should not be called from user code, and is only exposed for use
4501+ when subclassing the
4502+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
4503+
4504+ :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
4505+ :param kwargs: The keyword arguments from the call to send().
4506+ """
4507+ pass
4508+
4509+ def proxy_headers(self, proxy):
4510+ """Returns a dictionary of the headers to add to any request sent
4511+ through a proxy. This works with urllib3 magic to ensure that they are
4512+ correctly sent to the proxy, rather than in a tunnelled request if
4513+ CONNECT is being used.
4514+
4515+ This should not be called from user code, and is only exposed for use
4516+ when subclassing the
4517+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
4518+
4519+ :param proxies: The url of the proxy being used for this request.
4520+ :param kwargs: Optional additional keyword arguments.
4521+ """
4522+ headers = {}
4523+ username, password = get_auth_from_url(proxy)
4524+
4525+ if username and password:
4526+ headers['Proxy-Authorization'] = _basic_auth_str(username,
4527+ password)
4528+
4529+ return headers
4530+
4531+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
4532+ """Sends PreparedRequest object. Returns Response object.
4533+
4534+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
4535+ :param stream: (optional) Whether to stream the request content.
4536+ :param timeout: (optional) The timeout on the request.
4537+ :param verify: (optional) Whether to verify SSL certificates.
4538+ :param cert: (optional) Any user-provided SSL certificate to be trusted.
4539+ :param proxies: (optional) The proxies dictionary to apply to the request.
4540+ """
4541+
4542+ conn = self.get_connection(request.url, proxies)
4543+
4544+ self.cert_verify(conn, request.url, verify, cert)
4545+ url = self.request_url(request, proxies)
4546+ self.add_headers(request)
4547+
4548+ chunked = not (request.body is None or 'Content-Length' in request.headers)
4549+
4550+ timeout = TimeoutSauce(connect=timeout, read=timeout)
4551+
4552+ try:
4553+ if not chunked:
4554+ resp = conn.urlopen(
4555+ method=request.method,
4556+ url=url,
4557+ body=request.body,
4558+ headers=request.headers,
4559+ redirect=False,
4560+ assert_same_host=False,
4561+ preload_content=False,
4562+ decode_content=False,
4563+ retries=self.max_retries,
4564+ timeout=timeout
4565+ )
4566+
4567+ # Send the request.
4568+ else:
4569+ if hasattr(conn, 'proxy_pool'):
4570+ conn = conn.proxy_pool
4571+
4572+ low_conn = conn._get_conn(timeout=timeout)
4573+
4574+ try:
4575+ low_conn.putrequest(request.method,
4576+ url,
4577+ skip_accept_encoding=True)
4578+
4579+ for header, value in request.headers.items():
4580+ low_conn.putheader(header, value)
4581+
4582+ low_conn.endheaders()
4583+
4584+ for i in request.body:
4585+ low_conn.send(hex(len(i))[2:].encode('utf-8'))
4586+ low_conn.send(b'\r\n')
4587+ low_conn.send(i)
4588+ low_conn.send(b'\r\n')
4589+ low_conn.send(b'0\r\n\r\n')
4590+
4591+ r = low_conn.getresponse()
4592+ resp = HTTPResponse.from_httplib(
4593+ r,
4594+ pool=conn,
4595+ connection=low_conn,
4596+ preload_content=False,
4597+ decode_content=False
4598+ )
4599+ except:
4600+ # If we hit any problems here, clean up the connection.
4601+ # Then, reraise so that we can handle the actual exception.
4602+ low_conn.close()
4603+ raise
4604+ else:
4605+ # All is well, return the connection to the pool.
4606+ conn._put_conn(low_conn)
4607+
4608+ except socket.error as sockerr:
4609+ raise ConnectionError(sockerr, request=request)
4610+
4611+ except MaxRetryError as e:
4612+ raise ConnectionError(e, request=request)
4613+
4614+ except _ProxyError as e:
4615+ raise ProxyError(e)
4616+
4617+ except (_SSLError, _HTTPError) as e:
4618+ if isinstance(e, _SSLError):
4619+ raise SSLError(e, request=request)
4620+ elif isinstance(e, TimeoutError):
4621+ raise Timeout(e, request=request)
4622+ else:
4623+ raise
4624+
4625+ return self.build_response(request, resp)
4626
4627=== added file 'pylib/requests/api.py'
4628--- pylib/requests/api.py 1970-01-01 00:00:00 +0000
4629+++ pylib/requests/api.py 2018-05-31 04:33:07 +0000
4630@@ -0,0 +1,120 @@
4631+# -*- coding: utf-8 -*-
4632+
4633+"""
4634+requests.api
4635+~~~~~~~~~~~~
4636+
4637+This module implements the Requests API.
4638+
4639+:copyright: (c) 2012 by Kenneth Reitz.
4640+:license: Apache2, see LICENSE for more details.
4641+
4642+"""
4643+
4644+from . import sessions
4645+
4646+
4647+def request(method, url, **kwargs):
4648+ """Constructs and sends a :class:`Request <Request>`.
4649+ Returns :class:`Response <Response>` object.
4650+
4651+ :param method: method for the new :class:`Request` object.
4652+ :param url: URL for the new :class:`Request` object.
4653+ :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
4654+ :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
4655+ :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
4656+ :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
4657+ :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
4658+ :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
4659+ :param timeout: (optional) Float describing the timeout of the request in seconds.
4660+ :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
4661+ :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
4662+ :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
4663+ :param stream: (optional) if ``False``, the response content will be immediately downloaded.
4664+ :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
4665+
4666+ Usage::
4667+
4668+ >>> import requests
4669+ >>> req = requests.request('GET', 'http://httpbin.org/get')
4670+ <Response [200]>
4671+ """
4672+
4673+ session = sessions.Session()
4674+ return session.request(method=method, url=url, **kwargs)
4675+
4676+
4677+def get(url, **kwargs):
4678+ """Sends a GET request. Returns :class:`Response` object.
4679+
4680+ :param url: URL for the new :class:`Request` object.
4681+ :param \*\*kwargs: Optional arguments that ``request`` takes.
4682+ """
4683+
4684+ kwargs.setdefault('allow_redirects', True)
4685+ return request('get', url, **kwargs)
4686+
4687+
4688+def options(url, **kwargs):
4689+ """Sends a OPTIONS request. Returns :class:`Response` object.
4690+
4691+ :param url: URL for the new :class:`Request` object.
4692+ :param \*\*kwargs: Optional arguments that ``request`` takes.
4693+ """
4694+
4695+ kwargs.setdefault('allow_redirects', True)
4696+ return request('options', url, **kwargs)
4697+
4698+
4699+def head(url, **kwargs):
4700+ """Sends a HEAD request. Returns :class:`Response` object.
4701+
4702+ :param url: URL for the new :class:`Request` object.
4703+ :param \*\*kwargs: Optional arguments that ``request`` takes.
4704+ """
4705+
4706+ kwargs.setdefault('allow_redirects', False)
4707+ return request('head', url, **kwargs)
4708+
4709+
4710+def post(url, data=None, **kwargs):
4711+ """Sends a POST request. Returns :class:`Response` object.
4712+
4713+ :param url: URL for the new :class:`Request` object.
4714+ :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
4715+ :param \*\*kwargs: Optional arguments that ``request`` takes.
4716+ """
4717+
4718+ return request('post', url, data=data, **kwargs)
4719+
4720+
4721+def put(url, data=None, **kwargs):
4722+ """Sends a PUT request. Returns :class:`Response` object.
4723+
4724+ :param url: URL for the new :class:`Request` object.
4725+ :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
4726+ :param \*\*kwargs: Optional arguments that ``request`` takes.
4727+ """
4728+
4729+ return request('put', url, data=data, **kwargs)
4730+
4731+
4732+def patch(url, data=None, **kwargs):
4733+ """Sends a PATCH request. Returns :class:`Response` object.
4734+
4735+ :param url: URL for the new :class:`Request` object.
4736+ :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
4737+ :param \*\*kwargs: Optional arguments that ``request`` takes.
4738+ """
4739+
4740+ return request('patch', url, data=data, **kwargs)
4741+
4742+
4743+def delete(url, **kwargs):
4744+ """Sends a DELETE request. Returns :class:`Response` object.
4745+
4746+ :param url: URL for the new :class:`Request` object.
4747+ :param \*\*kwargs: Optional arguments that ``request`` takes.
4748+ """
4749+
4750+ return request('delete', url, **kwargs)
4751
4752=== added file 'pylib/requests/auth.py'
4753--- pylib/requests/auth.py 1970-01-01 00:00:00 +0000
4754+++ pylib/requests/auth.py 2018-05-31 04:33:07 +0000
4755@@ -0,0 +1,193 @@
4756+# -*- coding: utf-8 -*-
4757+
4758+"""
4759+requests.auth
4760+~~~~~~~~~~~~~
4761+
4762+This module contains the authentication handlers for Requests.
4763+"""
4764+
4765+import os
4766+import re
4767+import time
4768+import hashlib
4769+
4770+from base64 import b64encode
4771+
4772+from .compat import urlparse, str
4773+from .cookies import extract_cookies_to_jar
4774+from .utils import parse_dict_header
4775+
4776+CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
4777+CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
4778+
4779+
4780+def _basic_auth_str(username, password):
4781+ """Returns a Basic Auth string."""
4782+
4783+ return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
4784+
4785+
4786+class AuthBase(object):
4787+ """Base class that all auth implementations derive from"""
4788+
4789+ def __call__(self, r):
4790+ raise NotImplementedError('Auth hooks must be callable.')
4791+
4792+
4793+class HTTPBasicAuth(AuthBase):
4794+ """Attaches HTTP Basic Authentication to the given Request object."""
4795+ def __init__(self, username, password):
4796+ self.username = username
4797+ self.password = password
4798+
4799+ def __call__(self, r):
4800+ r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
4801+ return r
4802+
4803+
4804+class HTTPProxyAuth(HTTPBasicAuth):
4805+ """Attaches HTTP Proxy Authentication to a given Request object."""
4806+ def __call__(self, r):
4807+ r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
4808+ return r
4809+
4810+
4811+class HTTPDigestAuth(AuthBase):
4812+ """Attaches HTTP Digest Authentication to the given Request object."""
4813+ def __init__(self, username, password):
4814+ self.username = username
4815+ self.password = password
4816+ self.last_nonce = ''
4817+ self.nonce_count = 0
4818+ self.chal = {}
4819+ self.pos = None
4820+
4821+ def build_digest_header(self, method, url):
4822+
4823+ realm = self.chal['realm']
4824+ nonce = self.chal['nonce']
4825+ qop = self.chal.get('qop')
4826+ algorithm = self.chal.get('algorithm')
4827+ opaque = self.chal.get('opaque')
4828+
4829+ if algorithm is None:
4830+ _algorithm = 'MD5'
4831+ else:
4832+ _algorithm = algorithm.upper()
4833+ # lambdas assume digest modules are imported at the top level
4834+ if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
4835+ def md5_utf8(x):
4836+ if isinstance(x, str):
4837+ x = x.encode('utf-8')
4838+ return hashlib.md5(x).hexdigest()
4839+ hash_utf8 = md5_utf8
4840+ elif _algorithm == 'SHA':
4841+ def sha_utf8(x):
4842+ if isinstance(x, str):
4843+ x = x.encode('utf-8')
4844+ return hashlib.sha1(x).hexdigest()
4845+ hash_utf8 = sha_utf8
4846+
4847+ KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
4848+
4849+ if hash_utf8 is None:
4850+ return None
4851+
4852+ # XXX not implemented yet
4853+ entdig = None
4854+ p_parsed = urlparse(url)
4855+ path = p_parsed.path
4856+ if p_parsed.query:
4857+ path += '?' + p_parsed.query
4858+
4859+ A1 = '%s:%s:%s' % (self.username, realm, self.password)
4860+ A2 = '%s:%s' % (method, path)
4861+
4862+ HA1 = hash_utf8(A1)
4863+ HA2 = hash_utf8(A2)
4864+
4865+ if nonce == self.last_nonce:
4866+ self.nonce_count += 1
4867+ else:
4868+ self.nonce_count = 1
4869+ ncvalue = '%08x' % self.nonce_count
4870+ s = str(self.nonce_count).encode('utf-8')
4871+ s += nonce.encode('utf-8')
4872+ s += time.ctime().encode('utf-8')
4873+ s += os.urandom(8)
4874+
4875+ cnonce = (hashlib.sha1(s).hexdigest()[:16])
4876+ noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
4877+ if _algorithm == 'MD5-SESS':
4878+ HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
4879+
4880+ if qop is None:
4881+ respdig = KD(HA1, "%s:%s" % (nonce, HA2))
4882+ elif qop == 'auth' or 'auth' in qop.split(','):
4883+ respdig = KD(HA1, noncebit)
4884+ else:
4885+ # XXX handle auth-int.
4886+ return None
4887+
4888+ self.last_nonce = nonce
4889+
4890+ # XXX should the partial digests be encoded too?
4891+ base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
4892+ 'response="%s"' % (self.username, realm, nonce, path, respdig)
4893+ if opaque:
4894+ base += ', opaque="%s"' % opaque
4895+ if algorithm:
4896+ base += ', algorithm="%s"' % algorithm
4897+ if entdig:
4898+ base += ', digest="%s"' % entdig
4899+ if qop:
4900+ base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
4901+
4902+ return 'Digest %s' % (base)
4903+
4904+ def handle_401(self, r, **kwargs):
4905+ """Takes the given response and tries digest-auth, if needed."""
4906+
4907+ if self.pos is not None:
4908+ # Rewind the file position indicator of the body to where
4909+ # it was to resend the request.
4910+ r.request.body.seek(self.pos)
4911+ num_401_calls = getattr(self, 'num_401_calls', 1)
4912+ s_auth = r.headers.get('www-authenticate', '')
4913+
4914+ if 'digest' in s_auth.lower() and num_401_calls < 2:
4915+
4916+ setattr(self, 'num_401_calls', num_401_calls + 1)
4917+ pat = re.compile(r'digest ', flags=re.IGNORECASE)
4918+ self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
4919+
4920+ # Consume content and release the original connection
4921+ # to allow our new request to reuse the same one.
4922+ r.content
4923+ r.raw.release_conn()
4924+ prep = r.request.copy()
4925+ extract_cookies_to_jar(prep._cookies, r.request, r.raw)
4926+ prep.prepare_cookies(prep._cookies)
4927+
4928+ prep.headers['Authorization'] = self.build_digest_header(
4929+ prep.method, prep.url)
4930+ _r = r.connection.send(prep, **kwargs)
4931+ _r.history.append(r)
4932+ _r.request = prep
4933+
4934+ return _r
4935+
4936+ setattr(self, 'num_401_calls', 1)
4937+ return r
4938+
4939+ def __call__(self, r):
4940+ # If we have a saved nonce, skip the 401
4941+ if self.last_nonce:
4942+ r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
4943+ try:
4944+ self.pos = r.body.tell()
4945+ except AttributeError:
4946+ pass
4947+ r.register_hook('response', self.handle_401)
4948+ return r
4949
4950=== added file 'pylib/requests/cacert.pem'
4951--- pylib/requests/cacert.pem 1970-01-01 00:00:00 +0000
4952+++ pylib/requests/cacert.pem 2018-05-31 04:33:07 +0000
4953@@ -0,0 +1,5026 @@
4954+# This Source Code Form is subject to the terms of the Mozilla Public
4955+# License, v. 2.0. If a copy of the MPL was not distributed with this
4956+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
4957+
4958+# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
4959+# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
4960+# Label: "GTE CyberTrust Global Root"
4961+# Serial: 421
4962+# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db
4963+# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74
4964+# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36
4965+-----BEGIN CERTIFICATE-----
4966+MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD
4967+VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv
4968+bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv
4969+b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV
4970+UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
4971+cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
4972+b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH
4973+iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS
4974+r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4
4975+04Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r
4976+GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9
4977+3PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P
4978+lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/
4979+-----END CERTIFICATE-----
4980+
4981+# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
4982+# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
4983+# Label: "Thawte Server CA"
4984+# Serial: 1
4985+# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d
4986+# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c
4987+# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9
4988+-----BEGIN CERTIFICATE-----
4989+MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx
4990+FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
4991+VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
4992+biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm
4993+MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx
4994+MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT
4995+DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3
4996+dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl
4997+cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3
4998+DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD
4999+gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91
5000+yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches