Merge lp:~chad.smith/vmbuilder/jenkins_kvm_azure_netplan_hotplug into lp:vmbuilder

Proposed by Chad Smith
Status: Superseded
Proposed branch: lp:~chad.smith/vmbuilder/jenkins_kvm_azure_netplan_hotplug
Merge into: lp:vmbuilder
Diff against target: 31474 lines (+30578/-0)
176 files modified
azure_config.sh (+95/-0)
base_indicies.sh (+28/-0)
build-juju-local.sh (+108/-0)
builder_config.sh (+78/-0)
checksum.sh (+27/-0)
config/cloud-azure.cfg (+9/-0)
config/cloud-maas.cfg (+11/-0)
config/cloud-maasv2.cfg (+10/-0)
config/cloud-maasv3.cfg (+10/-0)
config/cloud-precise.cfg (+24/-0)
config/cloud-trusty-pp64el.cfg (+13/-0)
config/cloud-trusty.cfg (+26/-0)
config/cloud-vps.cfg (+6/-0)
config/cloud.cfg (+17/-0)
copy_to_final.sh (+52/-0)
create-vhd.sh (+97/-0)
ec2_publisher.sh (+98/-0)
functions/bzr_check.sh (+14/-0)
functions/bzr_commit.sh (+23/-0)
functions/common (+37/-0)
functions/locker (+49/-0)
functions/merge_templates (+53/-0)
functions/mk_template.sh (+41/-0)
functions/retry (+16/-0)
generate-ubuntu-lists.sh (+44/-0)
get_serial.sh (+157/-0)
jenkins/CloudImages_Azure.sh (+162/-0)
jenkins/CloudImages_Base.sh (+96/-0)
jenkins/CloudImages_Base_Release_Delta.sh (+255/-0)
jenkins/CloudImages_Juju.sh (+253/-0)
jenkins/CloudImages_Update_Builder.sh (+68/-0)
jenkins/CloudImages_Vagrant.sh (+232/-0)
jenkins/MAAS_Builder.sh (+171/-0)
jenkins/MAAS_Promotion.sh (+31/-0)
jenkins/MAASv2_Builder.sh (+191/-0)
jenkins/MAASv2_Cleaner.sh (+55/-0)
jenkins/MAASv3_Builder.sh (+67/-0)
jenkins/Promote_Daily.sh (+55/-0)
jenkins/Promote_MAAS_Daily.sh (+48/-0)
jenkins/Publish_EC2.sh (+64/-0)
jenkins/Publish_Results_to_Tracker.sh (+34/-0)
jenkins/README.txt (+1/-0)
jenkins/Test_Azure.sh (+17/-0)
jenkins/build_lib.sh (+33/-0)
jenkins/env-test.sh (+2/-0)
launch_kvm.sh (+222/-0)
maas_config.sh (+75/-0)
make-seed.sh (+147/-0)
overlay.sh (+23/-0)
pylib/changelogger.py (+222/-0)
pylib/changelogger/ChangeLogger.py (+222/-0)
pylib/requests/__init__.py (+77/-0)
pylib/requests/adapters.py (+388/-0)
pylib/requests/api.py (+120/-0)
pylib/requests/auth.py (+193/-0)
pylib/requests/cacert.pem (+5026/-0)
pylib/requests/certs.py (+24/-0)
pylib/requests/compat.py (+115/-0)
pylib/requests/cookies.py (+454/-0)
pylib/requests/exceptions.py (+75/-0)
pylib/requests/hooks.py (+45/-0)
pylib/requests/models.py (+803/-0)
pylib/requests/packages/__init__.py (+3/-0)
pylib/requests/packages/chardet/__init__.py (+32/-0)
pylib/requests/packages/chardet/big5freq.py (+925/-0)
pylib/requests/packages/chardet/big5prober.py (+42/-0)
pylib/requests/packages/chardet/chardetect.py (+46/-0)
pylib/requests/packages/chardet/chardistribution.py (+231/-0)
pylib/requests/packages/chardet/charsetgroupprober.py (+106/-0)
pylib/requests/packages/chardet/charsetprober.py (+62/-0)
pylib/requests/packages/chardet/codingstatemachine.py (+61/-0)
pylib/requests/packages/chardet/compat.py (+34/-0)
pylib/requests/packages/chardet/constants.py (+39/-0)
pylib/requests/packages/chardet/cp949prober.py (+44/-0)
pylib/requests/packages/chardet/escprober.py (+86/-0)
pylib/requests/packages/chardet/escsm.py (+242/-0)
pylib/requests/packages/chardet/eucjpprober.py (+90/-0)
pylib/requests/packages/chardet/euckrfreq.py (+596/-0)
pylib/requests/packages/chardet/euckrprober.py (+42/-0)
pylib/requests/packages/chardet/euctwfreq.py (+428/-0)
pylib/requests/packages/chardet/euctwprober.py (+41/-0)
pylib/requests/packages/chardet/gb2312freq.py (+472/-0)
pylib/requests/packages/chardet/gb2312prober.py (+41/-0)
pylib/requests/packages/chardet/hebrewprober.py (+283/-0)
pylib/requests/packages/chardet/jisfreq.py (+569/-0)
pylib/requests/packages/chardet/jpcntx.py (+219/-0)
pylib/requests/packages/chardet/langbulgarianmodel.py (+229/-0)
pylib/requests/packages/chardet/langcyrillicmodel.py (+329/-0)
pylib/requests/packages/chardet/langgreekmodel.py (+225/-0)
pylib/requests/packages/chardet/langhebrewmodel.py (+201/-0)
pylib/requests/packages/chardet/langhungarianmodel.py (+225/-0)
pylib/requests/packages/chardet/langthaimodel.py (+200/-0)
pylib/requests/packages/chardet/latin1prober.py (+139/-0)
pylib/requests/packages/chardet/mbcharsetprober.py (+86/-0)
pylib/requests/packages/chardet/mbcsgroupprober.py (+54/-0)
pylib/requests/packages/chardet/mbcssm.py (+575/-0)
pylib/requests/packages/chardet/sbcharsetprober.py (+120/-0)
pylib/requests/packages/chardet/sbcsgroupprober.py (+69/-0)
pylib/requests/packages/chardet/sjisprober.py (+91/-0)
pylib/requests/packages/chardet/universaldetector.py (+170/-0)
pylib/requests/packages/chardet/utf8prober.py (+76/-0)
pylib/requests/packages/urllib3/__init__.py (+58/-0)
pylib/requests/packages/urllib3/_collections.py (+205/-0)
pylib/requests/packages/urllib3/connection.py (+204/-0)
pylib/requests/packages/urllib3/connectionpool.py (+710/-0)
pylib/requests/packages/urllib3/contrib/ntlmpool.py (+120/-0)
pylib/requests/packages/urllib3/contrib/pyopenssl.py (+422/-0)
pylib/requests/packages/urllib3/exceptions.py (+126/-0)
pylib/requests/packages/urllib3/fields.py (+177/-0)
pylib/requests/packages/urllib3/filepost.py (+100/-0)
pylib/requests/packages/urllib3/packages/__init__.py (+4/-0)
pylib/requests/packages/urllib3/packages/ordered_dict.py (+260/-0)
pylib/requests/packages/urllib3/packages/six.py (+385/-0)
pylib/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py (+13/-0)
pylib/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py (+105/-0)
pylib/requests/packages/urllib3/poolmanager.py (+258/-0)
pylib/requests/packages/urllib3/request.py (+141/-0)
pylib/requests/packages/urllib3/response.py (+308/-0)
pylib/requests/packages/urllib3/util/__init__.py (+27/-0)
pylib/requests/packages/urllib3/util/connection.py (+45/-0)
pylib/requests/packages/urllib3/util/request.py (+68/-0)
pylib/requests/packages/urllib3/util/response.py (+13/-0)
pylib/requests/packages/urllib3/util/ssl_.py (+133/-0)
pylib/requests/packages/urllib3/util/timeout.py (+234/-0)
pylib/requests/packages/urllib3/util/url.py (+162/-0)
pylib/requests/sessions.py (+637/-0)
pylib/requests/status_codes.py (+88/-0)
pylib/requests/structures.py (+127/-0)
pylib/requests/utils.py (+673/-0)
register-vagrant-version.sh (+107/-0)
rss-cleanup.sh (+16/-0)
rss-generate.sh (+103/-0)
should_build.py (+484/-0)
standalone.sh (+303/-0)
templates/default.tmpl (+420/-0)
templates/example-addin.tmpl (+140/-0)
templates/handle-xdeb.py (+15/-0)
templates/img-azure-12.04-addin.tmpl (+47/-0)
templates/img-azure-14.04-addin.tmpl (+58/-0)
templates/img-azure-14.10-addin.tmpl (+60/-0)
templates/img-azure-15.04-addin.tmpl (+59/-0)
templates/img-azure-15.10-addin.tmpl (+50/-0)
templates/img-azure-15.10-docker.tmpl (+25/-0)
templates/img-azure-16.04-addin.tmpl (+58/-0)
templates/img-azure-16.04-docker.tmpl (+8/-0)
templates/img-azure-16.10-addin.tmpl (+58/-0)
templates/img-azure-16.10-docker.tmpl (+8/-0)
templates/img-azure-17.04-addin.tmpl (+58/-0)
templates/img-azure-17.10-addin.tmpl (+58/-0)
templates/img-azure-18.04-addin.tmpl (+58/-0)
templates/img-azure-extra.tmpl (+19/-0)
templates/img-azure.tmpl (+354/-0)
templates/img-build.tmpl (+135/-0)
templates/img-extra-nets.tmpl (+141/-0)
templates/img-juju-addin.tmpl (+250/-0)
templates/img-juju.tmpl (+455/-0)
templates/img-maas.tmpl (+96/-0)
templates/img-maasv2.tmpl (+137/-0)
templates/img-maasv3.tmpl (+85/-0)
templates/img-smartcloud.tmpl (+112/-0)
templates/img-update.tmpl (+292/-0)
templates/img-vagrant.tmpl (+294/-0)
templates/img-vps.tmpl (+67/-0)
tests/azure-node-settings-tool.py (+111/-0)
tests/azure.sh (+286/-0)
tests/decider.py (+285/-0)
tests/jenkins-ssh (+68/-0)
tests/passless-sudoifer (+57/-0)
tests/run-azure.sh (+29/-0)
tests/test-azure.py (+233/-0)
tests/tracker.py (+187/-0)
tracker.sh (+16/-0)
tweet.sh (+44/-0)
ubuntu-adj2version (+53/-0)
update_release_directory.sh (+17/-0)
wait_package.sh (+27/-0)
To merge this branch: bzr merge lp:~chad.smith/vmbuilder/jenkins_kvm_azure_netplan_hotplug
Reviewer Review Type Date Requested Status
VMBuilder Pending
Review via email: mp+347174@code.launchpad.net

This proposal has been superseded by a proposal from 2018-05-31.

Commit message

Update Azure's nic hotplug script to use netplan if available instead of ENI

Also avoid appending unnecessary include directives in
/etc/network/interfaces on netplan-enabled systems.

Description of the change

WIP: I think I targeted the wrong branch, will resubmit tomorrow
diff should be http://paste.ubuntu.com/p/Kx8YrXv7cp/

Azure images deliver a script /usr/local/sbin/ephemeral_eth.sh which is called from udev add rules on nic hotplug events for nics named eth[1-9]*. This script was created when netplan wasn't a 'thing' and, as such, only cared about /etc/network/interfaces.

In Bionic and later, cloud-init writes a fallback interface config in /etc/netplan/50-cloud-init.yaml configuration dhcp on eth0 as a primary/mandatory NIC (optional: false). So boot will wait on that device to come up.

This changeset adds a test whether netplan command exists in ephemeral_eth.sh:
  - If netplan is present, a separate /etc/netplan/90-hotplug-<nicname>.yaml will be generated for each network device eth[1-9]n* that is attached after cloud-init's initial network configuration.
  - After the netplan yaml is created, call 'netplan apply' to bring up that device with dchp. The netplan config will specify that these nics are "optional: true" so that subsequent boots will not wait on them to come up in case they are subsequently detached.

Attaching nics in Azure is done through their UI or API. The attaching/detaching operation in Azure requires an instance to be stopped before attach/detach and started after the operation.

  Potental gap:
     There is no attempt to clean up old netplan yaml files, or to designate a new primary/mandatory nic because this original hotplug script didn't deal with udev rules for hotplug removal of nics (via Azure network interface detachment).
     This could present a minor issue if eth1 is attached (and optional by design) and eth0 gets detached. In that case, systemd may still wait for eth0 to come up because of the mandatory eth0 definition in /etc/netplan/50-cloud-init.yaml.

To post a comment you must log in.
Revision history for this message
Daniel Axtens (daxtens) wrote :

A few things:

1) netplan is the default on Artful too. I think your detection code is right, but your commit message is potentially wrong?

2) If I understand cloud-init and netplan correctly, couldn't you achieve the same effect by just adding this as /etc/netplan/99-azure-hotplug.yaml? Then you could drop ephemeral_eth.sh entirely on Artful and Bionic.

network:
....version: 2
....ethernets:
........ephemeral:
............dhcp4: true
............match:
................driver: hv_netvsc
................name: "eth*"
............optional: true

3) Looking at the code itself, you should probably use /run/netplan for ephemeral files, rather than /etc/netplan. That also solves your cleanup problem.

4) And it's worth knowing that netplan apply will look for network devices that are 'down' and them from their drivers and rebind them. With your approach, netplan apply will be run for each extra device, so if there are 4 extra devices, the first one configured won't be replugged, the second will be replugged once, the third will be replugged twice and so on. This *probably* isn't problematic, but it makes me nervous, especially doing it in rapid succession.

800. By Chad Smith

Revert changes to ephemeral_eth.sh and emit a netplan 90-hotplug-azure.yaml

cloud-init only sets up a network configuration at initial boot pinned to
the original macaddress. If we are building a netplan enabled image,
emit a static netplan yaml which will complement the orignal /etc/netplan/50-cloud-init.yaml fallback definition. If the original eth0 is no longer attached to vm, cloud-init's netplan yaml will not match by macaddress and system will fall through to match the following hotpluggedeth0 definition:

        hotpluggedeth0:
            dhcp4: true
            match:
                driver: hv_netvsc
                name: 'eth0'

801. By Chad Smith

Move /etc/network/interfaces include directive back out of config_udev.

Appended include directive in /etc/network/interfaces needs to exist for
both upstart and udev solutions. So, it can't live exclusively within
config_udev_or_netplan function. It needs to be present on all non-netplan
environments (upstart and ENI), but test we are not a netplan enabled
image before manipulating /etc/network/interfaces.

Unmerged revisions

801. By Chad Smith

Move /etc/network/interfaces include directive back out of config_udev.

Appended include directive in /etc/network/interfaces needs to exist for
both upstart and udev solutions. So, it can't live exclusively within
config_udev_or_netplan function. It needs to be present on all non-netplan
environments (upstart and ENI), but test we are not a netplan enabled
image before manipulating /etc/network/interfaces.

800. By Chad Smith

Revert changes to ephemeral_eth.sh and emit a netplan 90-hotplug-azure.yaml

cloud-init only sets up a network configuration at initial boot pinned to
the original macaddress. If we are building a netplan enabled image,
emit a static netplan yaml which will complement the orignal /etc/netplan/50-cloud-init.yaml fallback definition. If the original eth0 is no longer attached to vm, cloud-init's netplan yaml will not match by macaddress and system will fall through to match the following hotpluggedeth0 definition:

        hotpluggedeth0:
            dhcp4: true
            match:
                driver: hv_netvsc
                name: 'eth0'

799. By Chad Smith

Update Azure's nic hotplug script to use netplan if available instead of ENI

Also avoid appending unnecessary include directives in
/etc/network/interfaces on netplan-enabled systems.

798. By Dan Watkins

Install Azure model assertion in Azure bionic images

797. By Dan Watkins

Install linux-azure in bionic Azure images [a=Odd_Bloke][r=fginther,tribaal]

MP: https://code.launchpad.net/~ubuntu-on-ec2/vmbuilder/jenkins_kvm-oddbloke/+merge/341846

796. By Philip Roche

Merge lp:~ubuntu-on-ec2/vmbuilder/jenkins_kvm-oddbloke into lp:~ubuntu-on-ec2/vmbuilder/jenkins_kvm [a=daniel-thewatkins] [r=fginther,philroche]

Use HTTPS for Vagrant box redirects (LP: #1754948)

MP: https://code.launchpad.net/~ubuntu-on-ec2/vmbuilder/jenkins_kvm-oddbloke/+merge/341339

795. By Dan Watkins

Drop unscd from bionic Azure images [a=Odd_Bloke][r=fginther,philroche]

MP: https://code.launchpad.net/~daniel-thewatkins/vmbuilder/jenkins_kvm-drop-unscd/+merge/337830

794. By Dan Watkins

do not explicitly install cloud-init [a=mwhudson][r=fginther,Odd_Bloke,philroche]

MP: https://code.launchpad.net/~mwhudson/vmbuilder/jenkins_kvm.mwhudson/+merge/334878

793. By Francis Ginther

Update source image and package set for artful. Dropping packages that no longer exist.
[a=fginther][r=daniel-thewatkins,philroche,tribaal]

MP: https://code.launchpad.net/~fginther/vmbuilder/new-artful-builder/+merge/332487

792. By Francis Ginther

Add a bb-series version of the Azure suite specific template files, img-azure-18.04-addin.tmpl.
[a=fginther][r=daniel-thewatkins,rcj]

MP: https://code.launchpad.net/~fginther/vmbuilder/jenkins_kvm-add-azure-18.04/+merge/332368

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
=== added file 'azure_config.sh'
--- azure_config.sh 1970-01-01 00:00:00 +0000
+++ azure_config.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,95 @@
1#!/bin/bash
2
3# Load up some libraries
4my_dir="$( cd "$( dirname "$0" )" && pwd )"
5source "${my_dir}/functions/locker"
6source "${my_dir}/functions/common"
7source "${my_dir}/functions/retry"
8source "${my_dir}/functions/merge_templates"
9
10usage() {
11 cat <<EOM
12${0##/} - Populated values in build temple.
13
14 Required:
15 --template Template file
16 --extra Extra, arbitrary addin
17 --serial The build serial
18 --out The output file
19 --tar Name of tar file
20 --tar-d Name of directory to tar up
21 --version The version number of the distro
22 --proposed Build against proposed
23 --docker Install Docker/Docker compose
24EOM
25}
26
27short_opts="h"
28long_opts="out:,template:,serial:,tar:,tar-d:,version:,proposed,docker,extra:"
29getopt_out=$(getopt --name "${0##*/}" \
30 --options "${short_opts}" --long "${long_opts}" -- "$@") &&
31 eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
32
33serial=${SERIAL:-$(date +%Y%m%d)}
34
35# Standard templates
36template_f="$(readlink -f ${0%/*}/templates/img-azure.tmpl)"
37template_netaddin_f="$(readlink -f ${0%/*}/templates/img-extra-nets.tmpl)"
38template_extra_f="$(readlink -f ${0%/*}/templates/img-azure-extra.tmpl)"
39extra_addins=()
40
41while [ $# -ne 0 ]; do
42 cur=${1}; next=${2};
43 case "$cur" in
44 --template) template_f=$2; shift;;
45 --extra) extra_addins+=($2); shift;;
46 --serial) serial=$2; shift;;
47 --tar) tar_f=$2; shift;;
48 --tar-d) tar_d=$2; shift;;
49 --out) out_f=$2; shift;;
50 --version) version=$2; shift;;
51 --proposed) proposed="true";;
52 --docker) docker="1";;
53 --) shift; break;;
54 esac
55 shift;
56done
57
58fail() { echo "${@}" 2>&1; exit 1;}
59fail_usage() { fail "Must define $@"; }
60
61# Create the template file for image conversion
62sed -e "s,%S,${serial},g" \
63 -e "s,%v,${version},g" \
64 -e "s,%P,${proposed:-false},g" \
65 ${template_f} > ${out_f}.base ||
66 fail "Unable to write template file"
67
68# Support per-suite addins
69net_addin=1
70
71# Disable the extra nets for Azure due due to the systemd changes
72dist_ge ${version} vivid && net_addin=0
73
74# Order the addins
75default_addin="${template_f//.tmpl/}-${version}-addin.tmpl"
76docker_addin="${template_f//.tmpl/}-${version}-docker.tmpl"
77
78addins=(${default_addin})
79[ ${net_addin:-0} -eq 1 ] && addins+=("${template_netaddin_f}")
80[ ${docker:-0} -eq 1 -a -f "${docker_addin}" ] && addins+=("${docker_addin}")
81addins+=("${extra_addins[@]}" "${template_extra_f}")
82
83merge_templates ${out_f}.base ${out_f} ${addins[@]}
84
85debug "=================================================="
86debug "Content of template:"
87cat ${out_f}
88debug "=================================================="
89
90if [ -n "${tar_d}" ]; then
91 tar -C "${tar_d}" -cf "${tar_f}" . &&
92 debug "TAR'd up ${tar_d}" ||
93 fail "Failed to tar up ${tar_d}"
94fi
95exit 0
096
=== added file 'base_indicies.sh'
--- base_indicies.sh 1970-01-01 00:00:00 +0000
+++ base_indicies.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,28 @@
1#!/bin/bash -xe
2#
3# Simple job for creating indicies
4suite="${1:-$SUITE}"
5serial="${2:-$SERIAL}"
6
7umask 022
8cronrun="/srv/builder/vmbuilder/bin/cronrun"
9
10# Override and set some home variables
11export HOME="/srv/builder/vmbuilder"
12export CDIMAGE_BIN="${CDIMAGE_BIN:-$HOME/cdimage/bin}"
13export CDIMAGE_ROOT="${CDIMAGE_ROOT:-$HOME/cdimage}"
14export PUBLISH_SCRIPTS="${PUBLISH_SCRIPTS:-$HOME/ec2-publishing-scripts}"
15export PATH="${PUBLISH_SCRIPTS}:${CDIMAGE_BIN}:${PATH}"
16
17fail() { echo "${@}" 2>&1; exit 1;}
18
19echo "Checksumming result directories"
20work_d="${WORKD:-/srv/ec2-images}/${suite}/${serial}"
21
22${CDIMAGE_BIN}/checksum-directory "${work_d}" &&
23 checksum-directory "${work_d}/unpacked" ||
24 fail "Failed to checksum result directories"
25
26${PUBLISH_SCRIPTS}/update-build-indexes daily ${work_d} ${suite} &&
27 update-build-indexes daily ${work_d} ${suite} ||
28 fail "Failed to make the indexes for ${work_d}"
029
=== added file 'build-juju-local.sh'
--- build-juju-local.sh 1970-01-01 00:00:00 +0000
+++ build-juju-local.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,108 @@
1#!/bin/bash
2
3# Read in the common files
4myname=$(readlink -f ${0})
5mydir=$(dirname ${myname})
6mypdir=$(dirname ${mydir})
7
8# Scope stuff locally here
9# Create a temporary directory for the fun
10tmp_dir=$(mktemp -d builder.XXXXX --tmpdir=${TMPDIR:-/tmp})
11export TMPDIR=${tmp_dir}
12export WORKSPACE=${mydir}
13export HOME=${mydir}
14export LOCAL_BUILD=1
15
16clean() { [ -d ${tmp_dir} ] && rm -rf ${tmp_dir};
17 [ -d "${mydir}/Virtualbox\ VMS" ] && rm -rf "${mydir}/Virtualbox\ VMS";
18 exit "${@}"; }
19error() { echo "$@"; }
20debug() { error "$(date -R):" "$@"; }
21fail() { debug "${1:-Something bad happend}"; clean 1; }
22
23# Fly with the safety on!
24trap fail EXIT
25trap fail SIGINT
26
27test_cmd_exists() {
28 which $1 >> /dev/null || fail "Command $1 does not exist! Please install $2"
29}
30
31if [ "$(lsb_release -r -s | sed 's/\.//')" -lt 1404 ]; then
32 fail "This must be run on Ubuntu 14.04 or higher"
33fi
34
35test_cmd_exists qemu-nbd qemu-utils
36test_cmd_exists vboxmanage virtualbox
37test_cmd_exists bzr bzr
38test_cmd_exists sstream-query simplestreams
39
40# This defines what gets built
41build_for=(${BUILD_FOR:-trusty:amd64 precise:amd64})
42[ -n "${JUJU_CORE_PKG}" -o -n "${JUJU_LOCAL_PKG}" ] && \
43 [ ${#build_for[@]} -ge 2 ] && \
44 fail "JUJU_CORE_PKG and JUJU_LOCAL_PKG can be specified only for a single build target."
45
46for build in ${build_for[@]};
47do
48 suite=${build%%:*}
49 arch=${build##*:}
50 builder_img="${mydir}/${suite}-builder-${arch}.img"
51 results_d_arch="${mydir}/${suite}-${arch}"
52 built_img="${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.img"
53
54 [ ! -e "${results_d_arch}" ] &&
55 mkdir -p "${results_d_arch}"
56
57 cmd=(
58 "${mydir}/standalone.sh"
59 "--cloud_cfg ${mydir}/config/cloud-vps.cfg"
60 "--template ${mydir}/templates/img-juju.tmpl"
61 "--suite ${suite}"
62 "--arch ${arch}"
63 "--use_img ${builder_img}"
64 "--final_img ${built_img}"
65 "--resize_final 40"
66 )
67
68 [ ! -e "${builder_img}" ] && cmd+=("--fetch_new")
69 if [ -n "${JUJU_CORE_PKG}" -o -n "${JUJU_LOCAL_PKG}" ]; then
70 cmd+=("--cloud-init-file ${mydir}/templates/handle-xdeb.py:text/part-handler")
71 if [ -n "${JUJU_CORE_PKG}" ]; then
72 cmd+=("--cloud-init-file ${JUJU_CORE_PKG}:application/x-deb")
73 echo "JUJU_CORE_PKG=$(basename $JUJU_CORE_PKG)" > ${tmp_dir}/juju-sources.sh
74 fi
75 if [ -n "${JUJU_LOCAL_PKG}" ]; then
76 cmd+=("--cloud-init-file ${JUJU_LOCAL_PKG}:application/x-deb")
77 echo "JUJU_LOCAL_PKG=$(basename $JUJU_LOCAL_PKG)" >> ${tmp_dir}/juju-sources.sh
78 fi
79 cmd+=("--cloud-init-file ${tmp_dir}/juju-sources.sh:application/x-shellscript")
80 fi
81
82 [ -e "${results_d_arch}/${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.img" ] ||
83 ( cd ${results_d_arch} && ${cmd[@]} )
84
85 # The following Vagrant-ifies the build
86 SUITE=${suite} \
87 ARCH_TYPE=${arch} \
88 SERIAL="current" \
89 SRV_D="${mydir}/${suite}-${arch}" \
90 OUTPUT_D="${mydir}/${suite}-${arch}" \
91 WORKSPACE="${mydir}/${suite}-${arch}" \
92 ${mydir}/jenkins/CloudImages_Juju.sh
93
94 expected_box="${results_d_arch}/${suite}-server-cloudimg-${arch}-juju-vagrant-disk1.box"
95 [ -f "${expected_box}" ] || fail "unable to find ${expected_box}; build failed!"
96 results_out+=("${build} ${expected_box}")
97done
98
99# Clear the traps
100trap - EXIT
101trap - SIGINT
102trap
103
104debug "Results are in following locations"
105echo -e "${results_out[@]}"
106
107debug "Done with the build!"
108clean 0
0109
=== added file 'builder_config.sh'
--- builder_config.sh 1970-01-01 00:00:00 +0000
+++ builder_config.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,78 @@
1#!/bin/bash
2short_opts="h"
3long_opts="distro:,arch:,build-type:,bzr-automated-builds:,bzr-pubscripts:,bzr-livebuild:,bzr-vmbuilder:,out:,template:,serial:,proposed"
4getopt_out=$(getopt --name "${0##*/}" \
5 --options "${short_opts}" --long "${long_opts}" -- "$@") &&
6 eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
7
8usage() {
9 cat <<EOM
10${0##/} - Populated values in build temple.
11
12 Required:
13 --distro Distro code name, i.e. precise
14 --arch Arch, i.e. amd64, i386, armel, armhf
15 --template Template file
16 --serial The build serial
17 --out The output file
18 --proposed Build against -proposed
19
20 Optional:
21 --bzr-automated-builds bzr branch for automated ec2 builds
22 --bzr-pubscripts bzr branch of EC2 Publishing Scripts
23 --bzr-livebuild bzr branch of live-builder
24 --bzr-vmbuilder bzr branch of vmbuilder
25EOM
26}
27
28
29fail() { echo "${@}" 2>&1; exit 1;}
30
31serial=$(date +%Y%m%d)
32bzr_automated_builds="http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/automated-ec2-builds"
33bzr_pubscripts="http://bazaar.launchpad.net/~ubuntu-on-ec2/ubuntu-on-ec2/ec2-publishing-scripts"
34bzr_livebuild="http://bazaar.launchpad.net/~ubuntu-on-ec2/live-build/cloud-images"
35bzr_vmbuilder="http://bazaar.launchpad.net/~ubuntu-on-ec2/vmbuilder/0.11a"
36template_f="${PWD}/img-build.tmpl"
37proposed=0
38
39while [ $# -ne 0 ]; do
40 cur=${1}; next=${2};
41 case "$cur" in
42 --distro) distro=$2; shift;;
43 --arch) arch=$2; shift;;
44 --build-type) build_type=$2; shift;;
45 --bzr-automated-builds) bzr_automated_builds=$2; shift;;
46 --bzr-pubscripts) bzr_pubscripts=$2; shift;;
47 --bzr-livebuild) bzr_livebuild=$2; shift;;
48 --bzr-vmbuilder) bzr_vmbuilder=$2; shift;;
49 --template) template_f=$2; shift;;
50 --serial) serial=$2; shift;;
51 --out) out_f=$2; shift;;
52 --proposed) proposed=1;;
53 --) shift; break;;
54 esac
55 shift;
56done
57
58fail_usage() { fail "Must define $@"; }
59
60[ -z "${distro}" ] && fail_usage "--distro"
61[ -z "${arch}" ] && fail_usage "--arch"
62[ -z "${build_type}" ] && fail_usage "--build-type"
63[ -z "${out_f}" ] && fail_usage "--out"
64
65sed -e "s,%d,${distro},g" \
66 -e "s,%a,${arch},g" \
67 -e "s,%b,${build_type},g" \
68 -e "s,%A,${bzr_automated_builds},g" \
69 -e "s,%P,${bzr_pubscripts},g" \
70 -e "s,%L,${bzr_livebuild},g" \
71 -e "s,%V,${bzr_vmbuilder},g" \
72 -e "s,%S,${serial},g" \
73 -e "s,%p,${proposed:-0},g" \
74 -e "s,%C,$(awk 1 ORS='\\n' < "${HOME}/.lp_creds")," \
75 ${template_f} > ${out_f} ||
76 fail "Unable to write template file"
77
78exit 0
079
=== added file 'checksum.sh'
--- checksum.sh 1970-01-01 00:00:00 +0000
+++ checksum.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,27 @@
1# Override and set some home variables
2export HOME="${USE_HOME:-/srv/builder}"
3export CDIMAGE_BIN="${HOME}/cdimage/bin"
4export CDIMAGE_ROOT="${HOME}/cdimage"
5export PATH="${CDIMAGE_BIN}:${PATH}"
6
7SUITE_DIR="${BASE_D}/${SUITE}"
8[ -n "${STREAM}" -a "${STREAM}" != "daily" ] &&
9 SUITE_DIR="${BASE_D}/${STREAM}/${SUITE}"
10SERIAL_DIR="${SUITE_DIR}/${SERIAL}"
11
12echo "Checksumming the new version..."
13checksum-directory "${SERIAL_DIR}"
14
15if [ ! -d ${SERIAL_DIR}/unpacked ]; then
16 echo "Adding build info to the new version..."
17 mkdir -p ${SERIAL_DIR}/unpacked
18 cat << EOF > ${SERIAL_DIR}/unpacked/build-info.txt
19SERIAL=$SERIAL
20EOF
21fi
22
23checksum-directory ${SERIAL_DIR}/unpacked
24
25if [ "${UPDATE_CURRENT:-false}" = "true" ]; then
26 ./update_release_directory.sh "${SUITE_DIR}"
27fi
028
=== added directory 'config'
=== added file 'config/cloud-azure.cfg'
--- config/cloud-azure.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-azure.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,9 @@
1#cloud-config
2package_upgrade: true
3password: ubuntu
4chpasswd: { expire: False }
5ssh_pwauth: True
6packages:
7- pastebinit
8- zerofree
9- ubuntu-dev-tools
010
=== added file 'config/cloud-maas.cfg'
--- config/cloud-maas.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-maas.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,11 @@
1#cloud-config
2package_upgrade: true
3password: ubuntu
4packages:
5- bzr
6- kpartx
7- qemu-kvm
8- qemu-kvm-extras
9- qemu-kvm-extras-static
10- zerofree
11
012
=== added file 'config/cloud-maasv2.cfg'
--- config/cloud-maasv2.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-maasv2.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,10 @@
1#cloud-config
2#This is generic enough to build for both MAAS and general cloud images
3package_upgrade: true
4password: ubuntu
5packages:
6- bzr
7- qemu-utils
8- zerofree
9- gdisk
10- proot
011
=== added file 'config/cloud-maasv3.cfg'
--- config/cloud-maasv3.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-maasv3.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,10 @@
1#cloud-config
2#This is generic enough to build for both MAAS and general cloud images
3package_upgrade: true
4password: ubuntu
5packages:
6- bzr
7- qemu-utils
8- zerofree
9- gdisk
10- proot
011
=== added file 'config/cloud-precise.cfg'
--- config/cloud-precise.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-precise.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,24 @@
1#cloud-config
2package_upgrade: true
3password: ubuntu
4chpasswd: { expire: False }
5ssh_pwauth: True
6ssh_import_id:
7- daniel-thewatkins
8- philroche
9- rcj
10packages:
11- bzr
12- debootstrap
13- python-vm-builder
14- pastebinit
15- kpartx
16- qemu-kvm
17- qemu-kvm-extras
18- qemu-kvm-extras-static
19- debhelper
20- virtualbox
21- u-boot-tools
22- zerofree
23- gdisk
24- ubuntu-dev-tools
025
=== added file 'config/cloud-trusty-pp64el.cfg'
--- config/cloud-trusty-pp64el.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-trusty-pp64el.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,13 @@
1#cloud-config
2packages:
3- bzr
4- debootstrap
5- kpartx
6- debhelper
7- zerofree
8- gdisk
9- qemu-utils
10- ubuntu-dev-tools
11- gcc
12- make
13- zlib1g-dev
014
=== added file 'config/cloud-trusty.cfg'
--- config/cloud-trusty.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-trusty.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,26 @@
1#cloud-config
2#This is generic enough to build for both MAAS and general cloud images
3package_upgrade: true
4password: ubuntu
5chpasswd: { expire: False }
6ssh_pwauth: True
7ssh_import_id:
8- daniel-thewatkins
9- philroche
10- rcj
11apt_sources:
12- source: deb $MIRROR $RELEASE multiverse
13packages:
14- bzr
15- debootstrap
16- kpartx
17- qemu-kvm
18- qemu-user-static
19- debhelper
20- virtualbox
21- zerofree
22- gdisk
23- proot
24- u-boot-tools
25- ubuntu-dev-tools
26- zlib1g-dev
027
=== added file 'config/cloud-vps.cfg'
--- config/cloud-vps.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud-vps.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,6 @@
1#cloud-config
2packages:
3- pastebinit
4- zerofree
5- btrfs-tools
6- ubuntu-dev-tools
07
=== added file 'config/cloud.cfg'
--- config/cloud.cfg 1970-01-01 00:00:00 +0000
+++ config/cloud.cfg 2018-05-31 04:33:07 +0000
@@ -0,0 +1,17 @@
1#cloud-config
2# Generic cloud-config for builder instance
3package_upgrade: true
4password: ubuntu
5chpasswd: { expire: False }
6ssh_pwauth: True
7apt_sources:
8- source: deb $MIRROR $RELEASE multiverse
9packages:
10- bzr
11- zerofree
12- gdisk
13- gcc
14- make
15- git
16- ubuntu-dev-tools
17- zlib1g-dev
018
=== added file 'copy_to_final.sh'
--- copy_to_final.sh 1970-01-01 00:00:00 +0000
+++ copy_to_final.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,52 @@
1#!/bin/bash
2#
3# copies the files to their staging location
4
5DISTRO="${DISTRO:-$1}"
6WORKSPACE="${WORKSPACE:-$2}"
7SERIAL="${SERIAL:-$3}"
8BTYPE="${BTYPE:-$4}"
9BTYPE="${BTYPE:-server}"
10
11# Allow for legacy positional arguments
12test_build="${5:-0}"
13sandbox_build="${6:-0}"
14proposed_build="${7:-0}"
15
16# Allow for environment variable to control this
17TEST_BUILD="${TEST_BUILD:-$test_build}"
18SANDBOX_BUILD="${SANDBOX_BUILD:-$sandbox_build}"
19PROPOSED_BUILD="${PROPOSED_BUILD:-$proposed_build}"
20
21ROOT_D="${ROOT_D:-/srv/ec2-images}"
22base_d="${ROOT_D}/${DISTRO}/${SERIAL}"
23[ "${TEST_BUILD}" -eq 1 ] && base_d="${ROOT_D}/test_builds/${DISTRO}/${SERIAL}"
24[ "${SANDBOX_BUILD}" -eq 1 ] && base_d="${ROOT_D}/sandbox/${DISTRO}/${SERIAL}"
25[ "${PROPOSED_BUILD}" -eq 1 ] && base_d="${ROOT_D}/proposed/${DISTRO}/${SERIAL}"
26[ "${BTYPE}" = "desktop" ] && base_d="${ROOT_D}/desktop/${DISTRO}/${SERIAL}"
27
28# Make sure that the HWE directory is created
29if [[ "${BTYPE}" =~ server-hwe ]]; then
30 base_d="${base_d}/${BTYPE//server-/}"
31 [ ! -e "${base_d}" ] && mkdir -p "${base_d}"
32fi
33
34for roottar in $(find . -iname "*root.tar.gz"); do
35 echo "Generating file listing"
36
37 case ${roottar} in
38 *amd64*) arch_name="amd64";;
39 *i386*) arch_name="i386";;
40 *armel*) arch_name="armel";;
41 *armhf*) arch_name="armhf";;
42 *ppc64*) arch_name="ppc64el";;
43 *arm64*) arch_name="arm64";;
44 *) arch_name="unknown-$(date +%s)";;
45 esac
46
47 tar -tzvf ${roottar} >> "${WORKSPACE}/file-list-${arch_name}.log" ||
48 echo "Non fatal error. Failed to gather file list for ${roottar}"
49done
50
51cp -au ${DISTRO}-*/* ${base_d} || exit 1
52exit 0
053
=== added file 'create-vhd.sh'
--- create-vhd.sh 1970-01-01 00:00:00 +0000
+++ create-vhd.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,97 @@
1#!/bin/bash
2source "./functions/locker"
3
4usage() {
5cat << EOF
6This program is used to convert raw images to VHD files.
7
8 --suite: the Ubuntu Code name to build against
9 --source_file: the name of the raw image file to convert
10 --size: the size of the converted image in G (defaults to 30G)
11EOF
12exit 1
13}
14
15# Defaults
16vhd_size=30
17
18# Command line parsing
19short_opts="h"
20long_opts="suite:,source_file:,size:"
21getopt_out=$(getopt --name "${0##*/}" --options "${short_opts}"\
22 --long "${long_opts}" -- "$@")
23if [ $? -eq 0 ]; then
24 eval set -- "${getopt_out}"
25else
26 usage
27 exit 1
28fi
29
30while [ $# -ne 0 ]; do
31 cur=${1}; next=${2};
32
33 case "${cur}" in
34 --size) vhd_size="${2}"; shift;;
35 --source_file) source_file="${2}"; shift;;
36 --suite) suite="${2}"; shift;;
37 -h|--help) usage; exit 0;;
38 ?) usage; exit 1;;
39 --) shift; break;;
40 esac
41 shift;
42done
43
44if [ -z "$source_file" -o -z "$suite" ]; then
45 echo "--source_file and --suite required."
46 exit 1
47fi
48
49raw_name=$(readlink -f "$source_file")
50case ${suite} in
51 precise|trusty|wily|xenial)
52 vhd_name="${PWD}/${suite}-server-cloudimg-amd64-disk1.vhd"
53 ;;
54 *)
55 vhd_name="${PWD}/${suite}-server-cloudimg-amd64.vhd"
56 ;;
57esac
58
59# Copy the raw image to make it ready for VHD production
60cp --sparse=always "${raw_name}" "${raw_name}.pre-vhd" &&
61 debug "Copied raw image VHD production" ||
62 fail "Failed to copy raw image to ${raw_name}.pre-vhd"
63
64# Resize the copied RAW image
65debug "Truncating image to ${vhd_size}G"
66truncate -s "${vhd_size}G" "${raw_name}.pre-vhd" &&
67 debug "Truncated image at ${vhd_size}G" ||
68 fail "Failed to truncate disk image"
69
70# Convert to VHD first, step 1 of cheap hack
71# This is a cheap hack...half the time the next command
72# will fail with "VERR_INVALID_PARAMETER", so this is the,
73# er, workaround
74debug "Converting to VHD"
75_vbox_cmd convertfromraw --format VHD \
76 "${raw_name}.pre-vhd" \
77 "${vhd_name}.pre" &&
78 debug "Converted raw disk to VHD" ||
79 fail "Failed to convert raw image to VHD"
80
81# Clone the disk to fixed, VHD for Azure
82debug "Converting to VHD format from raw..."
83debug ".....this might take a while...."
84_vbox_cmd clonehd --format VHD --variant Fixed \
85 "${vhd_name}.pre" \
86 "${vhd_name}" &&
87 debug "Converted raw disk to VHD format using VirtualBox" ||
88 fail "Failed to convert raw image to VHD disk!"
89
90# Remove the unneeded files
91rm "${vhd_name}.pre" "${raw_name}.pre-vhd"
92
93debug "Image Characteristics:"
94_vbox_cmd showhdinfo "${vhd_name}"
95
96
97debug "Raw image converted to VHD"
098
=== added file 'ec2_publisher.sh'
--- ec2_publisher.sh 1970-01-01 00:00:00 +0000
+++ ec2_publisher.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,98 @@
1#!/bin/bash
2#
3# Simple execution wrapper for publishing images to EC2 from within Jenkins
4#
5suite="${1}"
6serial="${2}"
7btype="${3}"
8work_d="${4}"
9test_build="${5:-0}"
10sandbox_build="${6:-0}"
11allow_existing="${7:-0}"
12pub_type="daily"
13
14umask 022
15ec2_pub_scripts="${EC2_PUB_LOC:-${PWD}/ec2-publishing-scripts}"
16cronrun="/srv/builder/vmbuilder/bin/cronrun"
17
18# Override and set some home variables
19export HOME="/srv/builder/vmbuilder"
20export EC2_DAILY="${EC2_DAILY:-$HOME/ec2-daily}"
21export CDIMAGE_BIN="${CDIMAGE_BIN:-$HOME/cdimage/bin}"
22export CDIMAGE_ROOT="${CDIMAGE_ROOT:-$HOME/cdimage}"
23AUTO_BUILDS="${AUTO_BUILDS:-$EC2_DAILY/automated-ec2-builds}"
24PUBLISH_SCRIPTS="${PUBLISH_SCRIPTS:-$HOME/ec2-publishing-scripts}"
25XC2_PATH="${EC2_DAILY}/xc2"
26S3CMD_PATH="${S3CMD_PATH:-$EC2_DAILY/s3cmd}"
27MISC_PATH="${MISC_PATH:-$EC2_DAILY/misc}"
28VMBUILDER_PATH="${VMBUILDER_PATH:-$EC2_DAILY/vmbuilder}"
29( which euca-version >> /dev/null >&1 ) || EUCA2OOLS_PATH="${EC2_DAILY}/euca2ools"
30BOTO_PATH="${EC2_DAILY}/boto"
31
32export EC2_AMITOOL_HOME="${EC2_DAILY}/ec2-ami-tools"
33export LIVE_BUILD_PATH="${EC2_DAILY}/live-build"
34MYPATH=${VMBUILDER_PATH}:${XC2_PATH}:${S3CMD_PATH}:${PUBLISH_SCRIPTS}:${AUTO_BUILDS}:${VMBUILDER_PATH}:${EC2_AMITOOL_HOME}/bin:$HOME/bin:${CDIMAGE_BIN}
35
36[ -n "${EUCA2OOLS_PATH}" ] && MYPATH="${MYPATH}:${EUCA2OOLS_PATH}/bin"
37
38export PYTHONPATH="${BOTO_PATH}:${EUCA2OOLS_PATH}"
39export PATH=${MYPATH}:/usr/bin:/usr/sbin:/usr/bin:/sbin:/bin
40export JAVA_HOME=/usr
41export START_D=${EC2_DAILY}
42export PUBLISH_BASE=/srv/ec2-images
43export XC2_RETRY_ON="Server.InternalError Read.timeout Server.Unavailable Unable.to.connect"
44
45export PATH="/srv/builder/vmbuilder/cdimage/bin:${ec2_pub_scripts}:${PATH}"
46
47fail() { echo "${@}" 2>&1; exit 1;}
48
49[ -e "${ec2_pub_scripts}" ] ||
50 fail "Please make sure that ec2-publishing-scripts in the current path or define EC2_PUB_LOC"
51
52[ "$#" -eq 4 -o "$#" -eq 5 -o "$#" -eq 6 -o "$#" -eq 7 ] ||
53 fail "Incorrect number of parameters. Must invoke with: <suite> <serial> <build type> <directory>"
54
55[ "${test_build}" -eq 1 ] && {
56 echo "Build has been marked as a test build!";
57 echo "Publishing image to sandbox location";
58 pub_type="testing";
59}
60
61[ "${sandbox_build}" -eq 1 ] && {
62 echo "Build has been marked as a sandbox build!";
63 echo "Publishing image to Sandbox location";
64 pub_type="sandbox";
65}
66
67echo "Checksumming result directories"
68checksum-directory "${work_d}" &&
69 checksum-directory "${work_d}/unpacked" ||
70 fail "Failed to checksum result directories"
71
72# Drop ebs-standard and ebs-io1 from publication for xenial and after
73if [[ "${suite}" > "xenial" || "${suite}" == "xenial" ]] ; then
74 export OVERRIDE_ITEMS_EBS="i386:ebs-ssd amd64:ebs-ssd"
75 export OVERRIDE_ITEMS_HVM="amd64:hvm-ssd"
76fi
77
78echo "Publishing to EC2"
79pub_args=(--verbose)
80[ "${allow_existing}" -eq 1 ] && pub_args+=(--allow-existing)
81${cronrun} publish-build \
82 "${pub_args[@]}" \
83 "${suite}" \
84 "${btype}" \
85 "${pub_type}" \
86 "${work_d}" ||
87 fail "failed publish-build ${suite} ${btype} daily ${work_d}"
88
89# Update current
90base_d="${work_d%/*}"
91serial_d="${work_d##*/}"
92current_d="${base_d}/current"
93[ -e "${current_d}" ] && rm "${current_d}"
94( cd "${base_d}" && ln -s "${serial_d}" current ) ||
95 fail "failed to update current directory"
96
97exit 0
98
099
=== added directory 'functions'
=== added file 'functions/bzr_check.sh'
--- functions/bzr_check.sh 1970-01-01 00:00:00 +0000
+++ functions/bzr_check.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,14 @@
1#!/bin/bash
2
3error() { echo "$@" 1>&2; }
4fail() { error "$@"; exit 1; }
5debug() { error "$(date -R):" "$@"; }
6
7check_branch() {
8 [ -e "${2}" ] && rm -rf "${2}"
9 debug "Checking out ${1} to ${2}"
10 bzr checkout --lightweight "${1}" "${2}" &&
11 debug "Checked out ${1}" ||
12 fail "Failed to checkout ${1}"
13}
14
015
=== added file 'functions/bzr_commit.sh'
--- functions/bzr_commit.sh 1970-01-01 00:00:00 +0000
+++ functions/bzr_commit.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,23 @@
1#!/bin/bash
2info_dir=${1}
3oargs=${*//${1}/}
4TEMP_D=""
5error() { echo "$@" 1>&2; }
6fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
7
8echo "Commit comment is: ${oargs}"
9if [ ! -d "${info_dir}/.bzr" ]; then
10 ( cd "${info_dir}" && bzr init && bzr add --quiet . &&
11 bzr commit --quiet -m "initial state" ) >/dev/null &&
12 error "initialized bzr directory in ${info_dir}" ||
13 fail "failed to initialize bzr directory in ${info_dir}"
14fi
15
16bzr add "${info_dir}"
17if bzr diff "${info_dir}" >/dev/null; then
18 error "no changes were made to ${info_dir}"
19else
20 bzr commit -m "${oargs[*]}" "${info_dir}" ||
21 fail "failed to bzr commit in ${info_dir}"
22fi
23
024
=== added file 'functions/common'
--- functions/common 1970-01-01 00:00:00 +0000
+++ functions/common 2018-05-31 04:33:07 +0000
@@ -0,0 +1,37 @@
1# Common functions
2# vi: syntax=sh expandtab ts=4
3
4error() { echo "$@" 1>&2; }
5fail() { error "$@"; exit 1; }
6debug() { echo "$(date -R): $@" 1>&2; }
7run() { echo "$(date -R): running cmd: ${@}";
8 env ${@} && debug "Command successful: ${@}" ||
9 fail "failed to run cmd: ${@}"; }
10
11dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
12dist_le() { [[ "$1" < "$2" || "$1" == "$2" ]]; }
13
14map_version_to_suite() {
15 version=(${1//-LTS/ LTS})
16 awk '-F[, ]' \
17 '$2 ~ /LTS/ && $1 ==V {print $5}; $2 !~ /LTS/ && $1 == V {print $4}' \
18 V="${version[0]}" /usr/share/distro-info/ubuntu.csv
19}
20
21map_suite_to_version() {
22 suite=${1}
23 awk '-F[, ]' \
24 '$2 ~ /LTS/ && $5 == S {print $1"-"$2}; $2 !~ /LTS/ && $4 == S {print $1}' \
25 S="${suite}" /usr/share/distro-info/ubuntu.csv
26}
27
28# Look for common names
29[ -z "${kvm}" -a -n "${kvm_builder}" ] && kvm="${kvm_builder}"
30[ -z "${kvm_builder}" -a -n "${kvm}" ] && kvm_builder="${kvm}"
31
32[ -n "${kvm}" ] && scripts="${kvm}"
33[ -n "${kvm_builder}" ] && scripts="${kvm_builder}"
34
35export kvm="${scripts}"
36export kvm_builder="${scripts}"
37export scripts
038
=== added file 'functions/locker'
--- functions/locker 1970-01-01 00:00:00 +0000
+++ functions/locker 2018-05-31 04:33:07 +0000
@@ -0,0 +1,49 @@
1# This prevents concurrent commands from running.
2_script=$(readlink -f "${BASH_SOURCE[0]:?}")
3_my_dir=$(dirname "$_script")
4source "${_my_dir}/common"
5source "${_my_dir}/retry"
6
7cmd_lock() {
8 LOCKFILE="/tmp/wrapper-`basename $1`"
9 LOCKFD=99
10
11 _lock() { flock -$1 $LOCKFD; }
12 _no_more_locking() { _lock u; _lock xn && rm -f $LOCKFILE; }
13 _prepare_locking() { eval "exec $LOCKFD>\"$LOCKFILE\""; trap _no_more_locking EXIT; }
14
15 _prepare_locking
16
17 exlock_now() { _lock xn; } # obtain an exclusive lock immediately or fail
18 exlock() { _lock x; } # obtain an exclusive lock
19 shlock() { _lock s; } # obtain a shared lock
20 unlock() { _lock u; } # drop a lock
21
22 count=0
23 max_count=60
24
25 while (! exlock_now );
26 do
27 let wait_time=$RANDOM%30
28 error "Waiting ${wait_time} seconds due to concurrent ${1} command"
29 sleep ${wait_time}
30
31 count=$(expr ${count} + 1)
32
33 if [ ${count} -gt ${max_count} ]; then
34 echo "Max wait expired. Failing."
35 exit 1
36 fi
37 done
38
39 error "Executing command, lock is free for: ${@}"
40 "${@}"
41 unlock
42}
43
44_vbox_cmd() {
45 # Virtual box is a real pain. This function uses the locker function above to
46 # wrap up vboxmanage to prevent its stupid issues with concurrency.
47 cmd_lock vboxmanage ${@} ||
48 fail "Failed to execute locked command: vboxmange ${@}"
49}
050
=== added file 'functions/merge_templates'
--- functions/merge_templates 1970-01-01 00:00:00 +0000
+++ functions/merge_templates 2018-05-31 04:33:07 +0000
@@ -0,0 +1,53 @@
1#!/bin/bash
2# vi: ts=4 noexpandtab syntax=sh
3#
4# This is just like mk_template.sh, but differs in that it handles
5# an arbitrary number of templates being merged in.
6#
7# ARG1 - base template
8# ARG2 - final templates
9# ARG* - addin templates
10
11# This merges templates together
12merge_templates() {
13 local cur_dir=${PWD}
14 local args=(${@})
15 local main_template=${1}; args=("${args[@]:1}")
16 local new_template=${2}; args=("${args[@]:1}")
17 local addins=("${args[@]}")
18
19 if [ "${#addins[@]}" -ge 1 ]; then
20 ntmp_dir=$(mktemp -d template.XXXXX --tmpdir=${TMPDIR:-/tmp})
21 cd ${ntmp_dir}
22
23 # Split the base template "ADDIN_HERE"
24 awk '/ADDIN_HERE/{n++}{print >"template" n ".txt" }' \
25 ${main_template} ||
26 fail "failed to split template!"
27
28 # Combine the split template with the addin in the middle
29 cat template.txt \
30 ${addins[@]} \
31 template1.txt \
32 > ${new_template}
33
34 # Do some variable replacement
35 sed -e "s,ADDIN_HERE,# END Addins,g" \
36 -e "s,%%PPA%%,${PPA},g" \
37 -e "s,%%PROPOSED%%,${PROPOSED:-0},g" \
38 -i ${new_template} ||
39 fail "Unable to finalize template!"
40
41 else
42
43 sed -e "s,ADDIN_HERE,# END Addins,g" \
44 "${main_template}" > "${new_template}"
45
46 fi
47
48 # Remove the temp directory if it exists
49 [ -n "${ntmp_dir}" ] && rm -rf "${ntmp_dir}"
50
51 # Get back to where we started
52 cd ${cur_dir}
53}
054
=== added file 'functions/mk_template.sh'
--- functions/mk_template.sh 1970-01-01 00:00:00 +0000
+++ functions/mk_template.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,41 @@
1#!/bin/bash
2
3# This merges templates together
4merge_template() {
5 cur_dir=${PWD}
6 main_template=${1}
7 addin_template=${2}
8 new_template=${3}
9
10 if [ -n "${addin_template}" ]; then
11 ntmp_dir=$(mktemp -d template.XXXXX --tmpdir=${TMPDIR:-/tmp})
12 cd ${ntmp_dir}
13
14 # Split the base template "ADDIN_HERE"
15 awk '/ADDIN_HERE/{n++}{print >"template" n ".txt" }' \
16 ${main_template} ||
17 fail "failed to split template!"
18
19 # Combine the split template with the addin in the middle
20 cat template.txt \
21 ${addin_template} \
22 template1.txt \
23 > ${new_template}
24
25 # Do some variable replacement
26 sed -e "s,ADDIN_HERE,# END Addins,g" \
27 -e "s,%%PPA%%,${PPA},g" \
28 -e "s,%%PROPOSED%%,${PROPOSED:-0},g" \
29 -i ${new_template} ||
30 fail "Unable to finalize template!"
31
32 else
33 "${main_template}" "${new_template}"
34 fi
35
36 # Remove the temp directory if it exists
37 [ -n "${ntmp_dir}" ] && rm -rf "${ntmp_dir}"
38
39 # Get back to where we started
40 cd ${cur_dir}
41}
042
=== added file 'functions/retry'
--- functions/retry 1970-01-01 00:00:00 +0000
+++ functions/retry 2018-05-31 04:33:07 +0000
@@ -0,0 +1,16 @@
1# Code for retrying commands
2
3retry() {
4 local trycount=${1} sleep=${2}
5 shift; shift;
6 local i=0 smsg=" sleeping ${sleep}: $*" ret=0
7 for((i=0;i<${trycount};i++)); do
8 "$@" && return 0
9 ret=$?
10 [ $(($i+1)) -eq ${trycount} ] && smsg=""
11 debug 1 "Warning: cmd failed [try $(($i+1))/${trycount}].${smsg}"
12 sleep $sleep
13 done
14 return $ret
15}
16
017
=== added file 'generate-ubuntu-lists.sh'
--- generate-ubuntu-lists.sh 1970-01-01 00:00:00 +0000
+++ generate-ubuntu-lists.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,44 @@
1#!/bin/bash
2# Generate a list of Ubuntu releases
3
4final_d="${FIANL_D:-/srv/jenkins}"
5tmpd=$(mktemp -d)
6
7trap "rm -rf ${tmpd}" EXIT SIGINT
8
9# Get the regular info
10ubuntu-distro-info --supported \
11 > ${tmpd}/ubuntu-supported.txt
12
13ubuntu-distro-info --all \
14 > ${tmpd}/ubuntu-all.txt
15
16ubuntu-distro-info --unsupported \
17 > ${tmpd}/ubuntu-unsupported.txt
18
19ubuntu-distro-info --release --supported \
20 > ${tmpd}/ubuntu-versions.txt
21
22# Populate releases which may be missing
23for suite in vivid:15.04 wily:15.10 xenial:16.04;
24do
25 echo "${suite%%:*}" >> ${tmpd}/ubuntu-supported.txt
26 echo "${suite%%:*}" >> ${tmpd}/ubuntu-all.txt
27 echo "${suite##*:}" >> ${tmpd}/ubuntu-versions.txt
28done
29
30# Sort and make it pretty
31cat ${tmpd}/ubuntu-supported.txt \
32 | sort -r -u > ${final_d}/ubuntu-supported.txt
33
34cat ${tmpd}/ubuntu-all.txt \
35 | egrep -v warty \
36 | sort -r -u > ${final_d}/ubuntu-all.txt
37
38cat ${tmpd}/ubuntu-versions.txt \
39 | sed "s, ,-,g" \
40 | sort -r -u \
41 > ${final_d}/ubuntu-versions.txt
42
43sort -r -u ${tmpd}/ubuntu-unsupported.txt \
44 > ${final_d}/ubuntu-unsupported.txt
045
=== added file 'get_serial.sh'
--- get_serial.sh 1970-01-01 00:00:00 +0000
+++ get_serial.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,157 @@
1#!/bin/bash -xe
2#
3# Determine the build serial and place files into the build serial location
4# Also, handle the unlikely race condition in case multiple builders arrive
5# At the same point.
6# copies the files to their staging location
7# Prevent race conditions for populating the aggregate build directory
8#
9# OUTPUT:
10# - serial.txt file in ${WORKSPACE}
11# - build_properties (OR ${BUILD_PROPERTIES}) file in ${PWD}
12# - build-info.txt in ${base_d}/unpacked (or ${base_nd}/unpacked})
13# NOTE: see code for how base_d and base_nd are computed
14
15# Required options to even do a build
16DISTRO="${DISTRO:-$1}"
17WORKSPACE="${WORKSPACE:-$2}" # where is the workspace
18BUILD_ID="${BUILD_ID:-$3}" # build id
19
20# Convert hwe builds to regular for the sake of tooling
21btype="${4:-server}" # server or something else
22if [[ "${btype}" =~ hwe ]]; then
23 hwe_btype="${btype}"
24 bytpe="server"; BTYPE="server"
25fi
26
27# Support the legacy broken positional stuff. This should have been
28# done with environmental variables or flags
29test_build="${5:-0}" # test build?
30sandbox_build="${6:-0}" # should it be a sandbox build
31allow_existing="${7:-1}" # allow existing
32publish_image="${8:-0}" # publish the image
33proposed_build="${9:-0}" # build from proposed
34
35# Make this less confusing by allowing someone to use environmental
36# variables.
37# TODO: utlemming: convert this --flags
38BTYPE="${BTYPE:-$btype}"
39TEST_BUILD="${TEST_BUILD:-$test_build}"
40SANDBOX_BUILD="${SANDBOX_BUILD:-$sandbox_build}"
41PUBLISH_IMAGE="${PUBLISH_IMAGE:-$publish_image}"
42PROPOSED_BUILD="${PROPOSED_BUILD:-$proposed_build}"
43
44ROOT_D="${ROOT_D:-/srv/ec2-images}"
45base_d="${ROOT_D}/${DISTRO}"
46[ "${TEST_BUILD}" -eq 1 ] && base_d="${ROOT_D}/test_builds/${DISTRO}"
47[ "${SANDBOX_BUILD}" -eq 1 ] && base_d="${ROOT_D}/sandbox/${DISTRO}" && TEST_BUILD=0
48[ "${BTYPE}" = "desktop" ] && base_d="${ROOT_D}/desktop/${DISTRO}"
49[ "${PROPOSED_BUILD}" -eq 1 ] && base_d="${ROOT_D}/proposed/${DISTRO}" &&
50 TEST_BUILD=0 && SANDBOX_BUILD=0
51
52let wait_time=$RANDOM%50
53sleep $wait_time # Make build collisions a bit harder
54
55make_hwe_meta() {
56 # Create a sub build-info.txt for HWE builds
57 serial="${1##*/}"
58 hwe_unpacked="${base_d}/${serial}/${hwe_btype//$BTYPE-/}/unpacked"
59 if [ -n "${hwe_btype}" ]; then
60 [ -d "${hwe_unpacked}" ] || mkdir -p "${hwe_unpacked}"
61 cat << EOF > "${hwe_unpacked}/build-info.txt"
62serial=${serial}
63orig_prefix=${DISTRO}-${hwe_btype}-cloudimg
64suite=${DISTRO}
65build_name=${hwe_btype}
66EOF
67 fi
68}
69
70make_meta() {
71 # Write the property file for publishing. This used
72 # to write trigger the EC2 publishing job
73 serial=${1##*/}
74 cat << EOM > "${BUILD_PROPERTIES:-$WORKSPACE/build_properties}"
75BUILD_TYPE=${BTYPE}
76SERIAL=${serial}
77SUITE=${DISTRO}
78TEST_BUILD=${TEST_BUILD}
79SANDBOX_BUILD=${SANDBOX_BUILD}
80PUBLISH_IMAGE=${PUBLISH_IMAGE}
81ALLOW_EXISTING=${ALLOW_EXISTING}
82PROPOSED_BUILD=${PROPOSED_BUILD}
83EOM
84
85 # Write the build-info.txt file. This is used in
86 # the publishing process
87 [ -d "${1}/unpacked" ] || mkdir -p "${1}/unpacked"
88 cat << EOF > "${1}/unpacked/build-info.txt"
89serial=${serial}
90orig_prefix=${DISTRO}-${BTYPE}-cloudimg
91suite=${DISTRO}
92build_name=${BTYPE}
93EOF
94 make_hwe_meta ${serial}
95 exit 0
96}
97
98$(stat /tmp/${DISTRO}-${BUILD_ID} > /dev/null 2>&1) && {
99 echo "Another builder is/has reserved this part of the build. Deferring..."
100 while [ -z "${destdir}" ]
101 do
102 sleep 5
103 finaldir=""
104
105 [ -e "${WORKSPACE}/serial.txt" ] && {
106 read serial < "${WORKSPACE}/serial.txt"
107 destdir="${base_d}/${serial}"
108 }
109
110 while read destdir
111 do
112 echo "Candidate serial found: ${destdir##*/}"
113 finaldir="${destdir}"
114 done < /tmp/${DISTRO}-${BUILD_ID}
115
116 if [ -n "${finaldir}" ]; then
117 echo "Aggregation directory reported as ${finaldir}"
118 echo "${finaldir##*/}" > "${WORKSPACE}/serial.txt"
119 make_hwe_meta "${finaldir##*/}"
120 exit 0
121 else
122 echo "destdir is not defined!" && exit 10
123 fi
124
125 done
126}
127
128# if we get here, then know that the build dir hasn't been created yet
129touch /tmp/${DISTRO}-$BUILD_ID
130test_base_d="${base_d}/$(date +%Y%m%d)"
131
132make_and_write() {
133 serial="${1##*/}"
134 echo "Creating aggregation directory ${1}"
135 echo "${serial}" > "${WORKSPACE}/serial.txt"
136 mkdir -p "${1}" &&
137 echo "${1}" >> /tmp/${DISTRO}-$BUILD_ID ||
138 exit 10
139
140 # Copy stuff to where it should go
141 make_meta "${1}"
142}
143
144if [ ! -d "${test_base_d}" ]; then
145 make_and_write "${test_base_d}"
146else
147 for bs in {1..30}
148 do
149 base_nd="${test_base_d}.${bs}"
150 serial="${base_nd##*/}"
151 echo "Checking on directory ${base_nd}"
152 [ ! -d "${base_nd}" ] && make_and_write "${base_nd}"
153 make_hwe_meta "${serial}"
154 done
155fi
156
157exit 0
0158
=== added directory 'jenkins'
=== added file 'jenkins/CloudImages_Azure.sh'
--- jenkins/CloudImages_Azure.sh 1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Azure.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,162 @@
1#!/bin/bash
2fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
3
4# set default umask
5umask 022
6
7# Pre-setup: Read the build properties from the previous build
8# and discard what we don't want
9[ -e build.info ] && cp build.info build_properties
10source build_properties
11
12
13# Load up some libraries
14my_dir="$( cd "$( dirname "$0" )" && pwd )"
15base_dir=$(dirname ${my_dir})
16source "${base_dir}/functions/locker"
17source "${base_dir}/functions/common"
18source "${base_dir}/functions/retry"
19
20debug() { echo "${@}"; }
21fail() { echo "${@}" 2>&1; exit 1;}
22
23
24# Shuffle stuff around"
25[ -e build_properties ] && mv build_properties parent_build_properties
26[ -e build.log ] && mv build.log parent_build.log
27[ -e "${SUITE}-build.sh" ] && rm "${SUITE}-build.sh"
28
29echo "-------------------"
30echo " Distro: ${SUITE}"
31echo " Serial: ${SERIAL}"
32echo " Type: ${BUILD_TYPE}"
33echo "-------------------"
34
35set -x
36# Variables
37case ${SUITE} in
38 precise|trusty|wily|xenial)
39 disk_name="${SUITE}-server-cloudimg-amd64-disk1.img"
40 raw_name="${PWD}/${SUITE}-server-cloudimg-amd64-disk1.raw"
41 vhd_name="${PWD}/${SUITE}-server-cloudimg-amd64-disk1.vhd"
42 ;;
43 *)
44 disk_name="${SUITE}-server-cloudimg-amd64.img"
45 raw_name="${PWD}/${SUITE}-server-cloudimg-amd64.raw"
46 vhd_name="${PWD}/${SUITE}-server-cloudimg-amd64.vhd"
47 ;;
48esac
49disk_root="${DISK_ROOT:-/srv/ec2-images/${SUITE}/${SERIAL}}"
50raw_disk="${PWD}/results.raw"
51launch_config="${PWD}/launch_config.sh"
52register_config="${PWD}/register_config.sh"
53pkg_tar="${PWD}/pkg.tar"
54pkg_tar_d="${PKG_TAR_D:-${kvm_builder}/azure_pkgs}"
55proposed="${PROPOSED:-false}"
56vhd_size=${VHD_SIZE:-30}
57
58# Covert image to a RAW disk to work with. The raw image is used
59# to populate the daily VHD in Azure
60debug "Converting QCow2 to Raw Disk"
61qemu-img \
62 convert -O raw \
63 "${disk_root}/${disk_name}" \
64 "${raw_name}" &&
65 debug "Converted QCow2 to Raw disk for manipulation" ||
66 fail "Failed to convert QCow2 to Raw disk"
67
68config_opts=(${CONFIG_OPTS})
69config_opts+=(
70 --version $(${kvm_builder}/ubuntu-adj2version ${SUITE})
71 --serial "${SERIAL}"
72 --out "${launch_config}"
73 )
74
75# Turns on building from proposed
76[ "${proposed}" == "true" ] &&
77 config_opts+=(--proposed)
78
79# Setup the configuration
80${kvm_builder}/azure_config.sh \
81 ${config_opts[@]} ||
82 fail "Failed to configure instance runtime"
83
84# Full disk populate for 12.04
85root_size=2
86if [ "${SUITE}" == "precise" ]; then
87 root_size=29
88 truncate -s 29G "${raw_name}.pre-vhd" &&
89 debug "Resized 12.04 image to full size" ||
90 fail "Failed to resize 12.04 to full size"
91fi
92
93case ${SUITE} in
94 precise|trusty|xenial)
95 builder_img=/srv/builder/images/precise-builder-latest.img
96 ;;
97 *)
98 builder_img=/srv/builder/images/artful-builder-latest.img
99 ;;
100esac
101
102# Launch KVM to do the work
103${kvm_builder}/launch_kvm.sh \
104 --id ${BUILD_ID} \
105 --user-data "${launch_config}" \
106 --cloud-config "${kvm_builder}/config/cloud-azure.cfg" \
107 --extra-disk "${raw_name}" \
108 --raw-disk "${WORKSPACE}/${SUITE}-output.raw" \
109 --raw-size ${root_size} \
110 --img-url ${builder_img} ||
111 fail "KVM instance failed to build image."
112
113rm "${WORKSPACE}/${SUITE}-output.raw"
114
115
116# Copy the raw image to make it ready for VHD production
117cp --sparse=always "${raw_name}" "${raw_name}.pre-vhd" &&
118 debug "Copied raw image VHD production" ||
119 fail "Failed to copy raw image to ${raw_name}.pre-vhd"
120
121# Resize the copied RAW image
122debug "Truncating image to ${vhd_size}G"
123truncate -s "${vhd_size}G" "${raw_name}.pre-vhd" &&
124 debug "Truncated image at ${vhd_size}G" ||
125 fail "Failed to truncate disk image"
126
127# Convert to VHD first, step 1 of cheap hack
128# This is a cheap hack...half the time the next command
129# will fail with "VERR_INVALID_PARAMETER", so this is the,
130# er, workaround
131debug "Converting to VHD"
132_vbox_cmd convertfromraw --format VHD \
133 "${raw_name}.pre-vhd" \
134 "${vhd_name}.pre" &&
135 debug "Converted raw disk to VHD" ||
136 fail "Failed to convert raw image to VHD"
137
138# Clone the disk to fixed, VHD for Azure
139debug "Converting to VHD format from raw..."
140debug ".....this might take a while...."
141_vbox_cmd clonehd --format VHD --variant Fixed \
142 "${vhd_name}.pre" \
143 "${vhd_name}" &&
144 debug "Converted raw disk to VHD format using VirtualBox" ||
145 fail "Failed to convert raw image to VHD disk!"
146
147# Remove the unneeded files
148rm "${vhd_name}.pre" "${raw_name}.pre-vhd"
149
150debug "Image Characteristics:"
151_vbox_cmd showhdinfo "${vhd_name}"
152
153
154debug "Raw image converted to VHD"
155
156# Archive the bzip2 file
157#debug "Archiving the VHD image"
158#pbzip2 -f "${vhd_name}" &&
159# debug "Created archive of the VHD image" ||
160# fail "Failed to compress image"
161
162exit 0
0163
=== added file 'jenkins/CloudImages_Base.sh'
--- jenkins/CloudImages_Base.sh 1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Base.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,96 @@
1#!/bin/bash
2
3# Set default umask
4umask 022
5
6DISTRO=${DISTRO:-$SUITE}
7DISTRO=${DISTRO:?Must define distro}
8build_config="${PWD}/${DISTRO}-build.sh"
9
10# Read in the common functions
11my_dir="$( cd "$( dirname "$0" )" && pwd )"
12base_dir=$(dirname ${my_dir})
13source "${base_dir}/functions/locker"
14source "${base_dir}/functions/common"
15source "${base_dir}/functions/retry"
16source "${my_dir}/build_lib.sh"
17select_build_config
18
19# Only block for serial if serial is unknown
20[ -z "${SERIAL}" ] && {
21 # Get the serial number
22 retry 3 10 \
23 "${base_dir}/get_serial.sh" \
24 "${DISTRO}" "${WORKSPACE}" "${BUILD_ID}" "${BTYPE}" 0 0 1 1 ||
25 fail "Failed to get serial for this build"
26
27 # Get the serial number
28 read SERIAL < serial.txt
29 [ -z ${SERIAL} ] && echo "NO SERIAL" && exit 10
30}
31
32# Create the configurations
33cmd=("${base_dir}/builder_config.sh"
34 --distro "${DISTRO}"
35 --build-type "${BTYPE}"
36 --arch "${ARCH_TYPE}"
37 --template ${base_dir}/templates/img-build.tmpl
38 --serial "${SERIAL}"
39 --out "${build_config}"
40)
41
42# Allow building from proposed
43[ "${PROPOSED_BUILD:-0}" -eq 1 ] && cmd+=("--proposed")
44[ "${USE_BUILDDS:-0}" -eq 1 ] && cmd+=("--bzr-automated-builds lp:~ubuntu-on-ec2/vmbuilder/automated-ec2-builds-buildd")
45[ -n "${BZR_AUTOMATED_EC2}" ] && cmd+=("--bzr-automated-builds ${BZR_AUTOMATED_EC2}")
46[ -n "${BZR_PUBSCRIPTS}" ] && cmd+=("--bzr-pubscripts ${BZR_PUBSCRIPTS}")
47[ -n "${BZR_LIVEBUILD}" ] && cmd+=("--bzr-livebuild ${BZR_LIVEBUILD}")
48[ -n "${BZR_VMBUILDER}" ] && cmd+=("--bzr-vmbuilder ${BZR_VMBUILDER}")
49
50# Do the build
51${cmd[@]} || fail "Failed to configure instance configuration"
52unset cmd
53
54# Exit after configuring for arm if so configured
55if [[ "${ARCH_TYPE}" =~ (arm|aarch64|arm64) ]]; then
56 echo "This is an ARM build. ARM rules will apply"
57 [ "${BUILD_ARM}" -eq 0 ] && exit 0
58fi
59
60# Launch the builder
61# Retry building the image twice, waiting five
62# minutes. This should buffer most failures caused
63# by bad mirrors.
64export MAX_CYCLES=2160
65retry 2 300 \
66 "${base_dir}/launch_kvm.sh" \
67 --id "${BUILD_ID}" \
68 --user-data "${build_config}" \
69 --cloud-config "${base_dir}/config/${cloud_init_cfg}" \
70 --img-url "${BUILDER_CLOUD_IMAGE}" \
71 --raw-disk "${WORKSPACE}/${DISTRO}.raw" \
72 --raw-size 20 ||
73 fail "KVM instance failed"
74
75tar -xvvf "${WORKSPACE}/${DISTRO}.raw" ||
76 fail "Result tar failed to unpack"
77
78rm "${WORKSPACE}/${DISTRO}.raw" ||
79 fail "Failed to remove unnecessary file"
80
81# Put the bits in place
82"${base_dir}/copy_to_final.sh" \
83 "${DISTRO}" \
84 "${WORKSPACE}" \
85 "${SERIAL}" \
86 "${BTYPE}" \
87 "${TEST_BUILD}" \
88 "${SANDBOX_BUILD}" \
89 "${PROPOSED_BUILD}" ||
90 fail "Failed to place final files to destination"
91
92# Copy the build properties into the workspace. This is set by get_serial.sh
93[ "${BUILD_PROPERTIES}" != "${WORKSPACE}/build_properties" ] &&
94 cp ${BUILD_PROPERTIES} ${WORKSPACE}/build_properties
95
96echo "ARCH=${ARCH_TYPE}" >> build_properties
097
=== added file 'jenkins/CloudImages_Base_Release_Delta.sh'
--- jenkins/CloudImages_Base_Release_Delta.sh 1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Base_Release_Delta.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,255 @@
1#!/bin/bash -x
2
3# Set default umask
4umask 022
5
6# Skip promotion if this file exists
7HOLIDAY_FILE=/srv/jenkins/HOLIDAY
8
9# Write the build properties file
10cat << EOF > "${WORKSPACE}/build_properties"
11SUITE=${SUITE}
12STREAM=${STREAM}
13SERIAL=${SERIAL}
14BUILD_TYPE=${BUILD_TYPE}
15
16EOF
17
18# Write the environmental variables to the run file
19env > ${SUITE}.run
20
21fail() { echo "$@"; exit 1;}
22dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
23
24arches=(i386 amd64 armel armhf arm64 ppc64el)
25exec_c="/srv/builder/vmbuilder/bin/cronrun"
26rel_base="/srv/ec2-images/releases/${SUITE}/release"
27rel_link=$(readlink ${rel_base})
28
29[ "${BUILD_TYPE}" = "desktop" ] &&
30 echo "Not valid for desktop builds" &&
31 exit 0
32
33# Find the existing manifest file
34old_manifest=$(find -L ${rel_base} -maxdepth 1 -iname '*amd64.manifest') ||
35 echo "Unable to find release manifest file"
36
37# Find the new manifest file
38new_manifest_d="/srv/ec2-images/${SUITE}/${SERIAL}"
39[ "${TEST_BUILD:-0}" -eq 1 ] && new_manifest_d="/srv/ec2-images/test_builds/${SUITE}/${SERIAL}"
40[ "${SANDBOX_BUILD:-0}" -eq 1 ] && new_manifest_d="/srv/ec2-images/sandbox/${SUITE}/${SERIAL}"
41new_manifest=$(find ${new_manifest_d} -maxdepth 1 -iname '*amd64.manifest') ||
42 fail "Unable to find new manifest file"
43
44# Find the previous serial if there was one
45previous_serial=$(find /srv/ec2-images/${SUITE}/ -maxdepth 1 -type d |\
46 awk -F\/ '{print$NF}' | sort -rn | grep "." | grep -v "${SERIAL}" | head -n1) ||
47 echo "Unable to find prior daily manifest"
48
49previous_manifest=${new_manifest//$SERIAL/$previous_serial}
50
51# Generate the pure package diffs
52for arch in "${arches[@]}"
53do
54 nm=${new_manifest//amd64/$arch}
55 om=${old_manifest//amd64/$arch}
56 pm=${previous_manifest/amd64/$arch}
57
58 [ -e "${nm}" ] &&
59 cp "${nm}" "${WORKSPACE}/manifest-${arch}-daily-${SERIAL}.txt"
60
61 # Generate the diff from daily to release
62 if [ -e "${nm}" -a -e "${om}" ]; then
63 release_diff=${new_manifest##*/}
64 release_diff=${release_diff//.manifest/-$rel_link-to-daily_manifest.diff}
65 release_diff=${release_diff//amd64/$arch}
66 diff -u ${om} ${nm} > "${WORKSPACE}/${release_diff}"
67 cp ${om} "${WORKSPACE}/manifest-${arch}-release.txt"
68 fi
69
70 # Generate the diff from daily to old daily
71 if [ -e "${nm}" -a -e "${pm}" ]; then
72 daily_diff=${new_manifest##*/}
73 daily_diff=${daily_diff//.manifest/-$previous_serial-to-$SERIAL-manifest.diff}
74 daily_diff=${daily_diff//amd64/$arch}
75 diff -u ${pm} ${nm} > "${WORKSPACE}/${daily_diff}"
76 cp ${pm} "${WORKSPACE}/manifest-${arch}-previous_daily-${previous_serial}.txt"
77 fi
78done
79
80# Determine if there is a different version of a particular package.
81# If so, write out the package information to the specified trigger file
82package_differences() {
83 # If a pattern is used, it should match a single entry in the manifest
84 package_pattern=$1
85 trigger_file=$2
86
87 echo "Checking for differences in ${package_pattern}"
88
89 v1="$(awk "/${package_pattern}/ {print\$NF}" ${old_manifest})"
90 v2="$(awk "/${package_pattern}/ {print\$NF}" ${new_manifest})"
91
92 if [ "x${v1}" != "x${v2}" ]; then
93 echo " Package changed old:${v1}, new:${v2}"
94 cat << PKGDIFF >> ${trigger_file}
95'${package_pattern}':
96- old: '${v1}'
97- new: '${v2}'
98
99PKGDIFF
100 else
101 echo " No difference old:${v1}, new:${v2}"
102 fi
103}
104
105# Set packages to trigger an automated promotion in this array
106# This list of packages is controlled through application of
107# lp:~cloudware/cpc-core/+git/cpc_policy:policies/0003_automated_daily_promotion.rst
108# trigger_set MUST NOT be modified without accompanying policy doc change
109declare -a trigger_set
110# This is an array of package names where the string is any awk-friendly
111# pattern supported by the expression in package_differences(), but it must
112# only match a single package
113#trigger_set=('example_package' 'example_package2-*')
114trigger_set=('pollinate')
115
116# Append the kernel package to the trigger_set array
117if dist_ge ${SUITE} quantal; then
118 trigger_set[${#trigger_set[@]}]='linux-image.*generic'
119else
120 trigger_set[${#trigger_set[@]}]='linux-image-virtual'
121fi
122
123# For legacy reasons the jenkins jobs use a "kernel" trigger file
124# for automated build promotion. All package changes will
125# use this single trigger file until the need arises for more
126# granularity.
127trigger_file="${WORKSPACE}/${SUITE}-kernel-trigger"
128if [ -e "${trigger_file}" ] ; then
129 echo "Cleaning up old trigger file in workspace"
130 rm --verbose "${trigger_file}"
131fi
132
133# Check all packages in the trigger_set array
134for pkg in ${trigger_set[@]} ; do
135 package_differences "${pkg}" "${trigger_file}"
136done
137
138# If the trigger file exists, determine if it should be pushed to the
139# build trigger directory for action
140if [ -e "${trigger_file}" ] ; then
141 if [ ! -e "${rel_base}" ]; then
142 echo "${SUITE} not released, not triggering"
143 rm --verbose "${trigger_file}"
144 elif [ ! -f ${HOLIDAY_FILE} ]; then
145 echo "Creating trigger file with contents:"
146 cat "${trigger_file}"
147 cp --verbose "${trigger_file}" \
148 "${TRIGGER_LOCATION:-/srv/builder/triggers/kernel}/${PARENT_BUILDER_ID}.trigger"
149 else
150 echo "Not creating trigger, ${HOLIDAY_FILE} found"
151 if [ -e "${TRIGGER_LOCATION:-/srv/builder/triggers/kernel}/${PARENT_BUILDER_ID}.trigger" ]; then
152 echo "Removing existing trigger from old build with same parent ID."
153 rm --verbose "${TRIGGER_LOCATION:-/srv/builder/triggers/kernel}/${PARENT_BUILDER_ID}.trigger"
154 fi
155 fi
156else
157 echo "No trigger file found"
158fi
159
160# Copy the diffs into the current workspace
161cp ${WORKSPACE}/*.diff ${new_manifest_d}/unpacked
162
163# Generate the mfdiff between the dailies
164[ -e "${previous_manifest}" -a -e "${new_manifest}" ] &&
165 ${exec_c} mfdiff amd64 ${SUITE} ${previous_manifest} ${new_manifest} >\
166 "${WORKSPACE}/${SUITE}-daily.changelog"
167
168# Generate the diff between daily and the released image
169[ -e "${old_manifest}" -a -e "${new_manifest}" ] &&
170 ${exec_c} mfdiff amd64 ${SUITE} ${old_manifest} ${new_manifest} >\
171 "${WORKSPACE}/${SUITE}-${rel_link}-to-daily.changelog"
172
173# Copy the changelogs into the current workspace
174cp ${WORKSPACE}/*.changelog ${new_manifest_d}/unpacked
175
176# The rest of the operations are for released images only
177[ ! -e "${rel_base}" ] &&
178 echo "No current release, aborting comparison" &&
179 exit 0
180
181# Tar up the deltas
182tar -C ${WORKSPACE} -jcvf "${WORKSPACE}/${SUITE}-${SERIAL}.tar.bz2" \
183 *.changelog \
184 *.txt \
185 *.diff ||
186 fail "Failed to create tarball"
187
188# Start the email report work
189changed_pkgs=$(grep '=>' ${SUITE}-${rel_link}-to-daily.changelog | \
190 sed -e 's,====,,g' -e 's,^, *,g' | sort -k2)
191
192# Generate the email template
193VER=$(${kvm}/ubuntu-adj2version ${SUITE})
194
195case ${VER} in
196 *8.04*) VER="${VER} LTS";
197 CODENAME="Hardy Heron";;
198 *10.04*) VER="${VER} LTS";
199 CODENAME="Lucid Lynx";;
200 *11.04*) CODENAME="Natty Narwhal";;
201 *11.10*) CODENAME="Oneiric Ocelot";;
202 *12.04*) VER="${VER} LTS";
203 CODENAME="Precise Pangolin";;
204 *12.10*) CODENAME="Quantal Queztal";;
205 *13.04*) CODENAME="Raring Ringtail";;
206 *13.10*) CODENAME="Saucy Salamander";;
207 *14.04*) VER="${VER} LTS";
208 CODENAME="Trusty Tahr";;
209 *14.10*) CODENAME="Utopic Unicorn";;
210 *15.04*) CODENAME="Vivid Vervet";;
211 *15.10*) CODENAME="Wily Werewolf";;
212esac
213
214email_name="${WORKSPACE}/${SUITE}-release_announcement.email"
215cat << EOF > "${email_name}"
216SUBJECT: Refreshed Cloud Images of ${VER} (${CODENAME}) [${SERIAL}]
217TO: ec2ubuntu@googlegroups.com; ubuntu-cloud@lists.ubuntu.com; ubuntu-cloud-announce@lists.ubuntu.com
218
219A new release of the Ubuntu Cloud Images for stable Ubuntu release ${VER} (${CODENAME}) is available at [1]. These new images superseded the existing images [2]. Images are available for download or immediate use on EC2 via publish AMI ids. Users who wish to update their existing installations can do so with:
220 'sudo apt-get update && sudo apt-get dist-upgrade && sudo reboot'.
221
222EOF
223
224if [ "${old_linux_kernel}" != "${new_linux_kernel}" ]; then
225 cat << EOF >> "${email_name}"
226The Linux kernel was updated from ${old_linux_kernel} [3] to ${new_linux_kernel} [4]
227
228EOF
229fi
230
231cat << EOF >> "${email_name}"
232The following packages have been updated. Please see the full changelogs
233for a complete listing of changes:
234${changed_pkgs}
235
236
237The following is a complete changelog for this image.
238$(cat ${SUITE}-${rel_link}-to-daily.changelog)
239
240--
241[1] http://cloud-images.ubuntu.com/releases/${SUITE}/release-${SERIAL}/
242[2] http://cloud-images.ubuntu.com/releases/${SUITE}/${rel_link}/
243EOF
244
245if [ "${old_linux_kernel}" != "${new_linux_kernel}" ]; then
246cat << EOF >> "${email_name}"
247[3] http://changelogs.ubuntu.com/changelogs/pool/main/l/linux/linux_${old_linux_kernel}/changelog
248[4] http://changelogs.ubuntu.com/changelogs/pool/main/l/linux/linux_${new_linux_kernel}/changelog
249EOF
250fi
251
252# Create release notes
253lnc=$(wc -l ${email_name} | awk '{print$1}')
254tail -n `expr $lnc - 3` ${email_name} > "${WORKSPACE}/release_notes.txt"
255cp ${WORKSPACE}/release_notes.txt ${new_manifest_d}/unpacked
0256
=== added file 'jenkins/CloudImages_Juju.sh'
--- jenkins/CloudImages_Juju.sh 1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Juju.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,253 @@
1#!/bin/bash
2
3# Set default umask
4umask 022
5
6# Read in the common files
7my_name=$(readlink -f ${0})
8my_dir=$(dirname ${my_name})
9my_pdir=$(dirname ${my_dir})
10
11# Source in the common functions
12source "${my_pdir}/functions/common"
13source "${my_pdir}/functions/retry"
14source "${my_pdir}/functions/locker"
15export HOME=${WORKSPACE}
16
17# needed for building on Jenkins
18[ -e "build_properties" ] && source build_properties
19
20# Copy the target disk imags
21ARCH_TYPE=${ARCH_TYPE:-$ARCH}
22disk_orig="${SUITE}-server-cloudimg-${ARCH_TYPE}-disk1.img"
23disk_cp="${disk_orig//$ARCH_TYPE/$ARCH_TYPE-juju-vagrant}"
24disk_root="${SRV_D:-/srv/ec2-images}/${SUITE}/${SERIAL:-current}"
25disk_working="${WORKSPACE}/${disk_cp}"
26final_disk="${WORKSPACE}/box-disk1.vdi"
27final_location="${OUTPUT_D:-/srv/ec2-images}/vagrant/${SUITE}/${SERIAL}"
28box_name="${disk_working//.img/.box}"
29raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
30build_host_suite=$(lsb_release -c -s)
31
32jenkins_build() {
33 [ -e "build_properties" ] &&
34 source build_properties ||
35 fail "Failed to read build_properties. I don't know what I'm doing!"
36
37 # Bail if something isn't right
38 SUITE=${SUITE:?Suite must be defined}
39 SERIAL=${SERIAL:?Serial must be defined}
40
41 cp "${disk_root}/${disk_orig}" "${disk_working}" ||
42 fail "Unable to copy ${disk_orig} from ${disk_root}"
43
44 qemu-img resize ${disk_working} 40G
45
46 # Launch KVM to do the worK
47 ${my_pdir}/launch_kvm.sh \
48 --id "${ARCH_TYPE}-${BUILD_ID}" \
49 --user-data "${my_pdir}/config/cloud-vps.cfg" \
50 --cloud-config "${my_pdir}/templates/img-juju.tmpl" \
51 --extra-disk "${disk_working}" \
52 --disk-gb 1 \
53 --raw-disk "${raw_f}" \
54 --raw-size 1 \
55 --img-url /srv/builder/images/precise-builder-latest.img ||
56 fail "KVM instance failed to build image."
57}
58
59# Assume that we're building in Jenkins unless otherwise stated
60# What this allows us to do is to use the standalone builder for testing
61# and finish running the bits below
62[ "${LOCAL_BUILD:-0}" -eq 1 ] || jenkins_build
63
64# Covert to VMDK.
65qemu-img convert -O raw ${disk_working} ${disk_working//.img/.raw}
66
67_vbox_cmd convertfromraw \
68 --format vdi \
69 ${disk_working//.img/.raw} ${final_disk}
70
71# Create the VM
72vmname="ubuntu-cloudimg-${SUITE}-juju-vagrant-${ARCH_TYPE}"
73_vbox_cmd modifyhd --compact ${final_disk}
74
75dist_v="Ubuntu"
76[ "${ARCH_TYPE}" = "amd64" ] && dist_v="Ubuntu_64"
77_vbox_cmd createvm \
78 --name ${vmname} \
79 --ostype ${dist_v} \
80 --register
81
82_vbox_cmd modifyvm ${vmname} \
83 --memory 2048 \
84 --boot1 disk \
85 --boot2 none \
86 --boot3 none \
87 --boot4 none \
88 --vram 12 \
89 --pae off \
90 --acpi on \
91 --ioapic on \
92 --rtcuseutc on \
93 --bioslogodisplaytime 0 \
94 --nic1 nat \
95 --nictype1 virtio
96
97if [ "${ARCH_TYPE}" = "i386" ]; then
98 _vbox_cmd modifyvm ${vmname} \
99 --ioapic off \
100 --pae on
101fi
102
103
104_vbox_cmd modifyvm ${vmname} --natpf1 "guestssh,tcp,,2222,,22"
105
106storage_cmd=(
107 _vbox_cmd storagectl "${vmname}"
108 --name "SATAController"
109 --add sata
110 --controller IntelAhci
111 --hostiocache on
112 )
113
114if [ "$(lsb_release -r -s | sed 's/\.//')" -lt 1404 ]; then
115 storage_cmd+=(--sataportcount 1)
116else
117 storage_cmd+=(--portcount 1)
118fi
119
120${storage_cmd[@]}
121
122_vbox_cmd storageattach ${vmname} \
123 --storagectl "SATAController" \
124 --port 0 \
125 --device 0 \
126 --type hdd \
127 --medium ${final_disk}
128
129# Set extra-data
130_vbox_cmd setextradata ${vmname} installdate ${serial}
131_vbox_cmd setextradata ${vmname} supported false
132
133# Set the Guest information to get rid of error message
134[ -e vagrant_image.pkgs ] && {
135
136 vbox_version=""
137 while read -r line
138 do
139 line=( $(echo ${line}) )
140 [[ ${line[0]} =~ virtualbox-guest-utils ]] && vbox_version=${line[1]}
141 done < vagrant_image.pkgs
142 debug "Guest Additions version is ${vbox_version}"
143
144 # Set the revision to some arbitrary value
145 _vbox_cmd guestproperty set ${vmname} \
146 "/VirtualBox/GuestAdd/Revision" '8000'
147
148 # Set the Ubuntu packaged version correctly
149 _vbox_cmd guestproperty set ${vmname} \
150 "/VirtualBox/GuestAdd/VersionExt" \
151 "${vbox_version//-dfsg-*/_Ubuntu}"
152
153 # Set the version string appropriately
154 _vbox_cmd guestproperty set ${vmname} \
155 "/VirtualBox/GuestAdd/Version" \
156 "${vbox_version//-dfsg-*/}"
157}
158
159mkdir ${WORKSPACE}/box
160_vbox_cmd export ${vmname} --output ${WORKSPACE}/box/box.ovf
161
162# Create the Vagrant file
163#macaddr="02:$(openssl rand -hex 5)"
164macaddr=$(awk '-F"' '/<Adapter slot="0" enabled="true"/ {print$6}' ${WORKSPACE}/box/box.ovf)
165cat << EOF > ${WORKSPACE}/box/Vagrantfile
166\$script = <<SCRIPT
167bzr branch lp:jujuredirector /tmp/jujuredir
168
169if ! grep precise /etc/lsb-release > /dev/null; then
170 cat << EOM > "/etc/apt/apt.conf.d/90proxy"
171Acquire::http::Proxy "http://10.0.3.1:8000";
172EOM
173
174 for series in precise trusty; do
175 version=\$(grep \$series /usr/share/distro-info/ubuntu.csv | cut -d, -f1 | cut -d' ' -f1)
176 expected_filename=/var/cache/lxc/cloud-\${series}/ubuntu-\${version}-server-cloudimg-${ARCH_TYPE}-root.tar.gz
177 if [ ! -e \$expected_filename ]; then
178 mkdir -p "/var/cache/lxc/cloud-\${series}"
179 curl -o "\$expected_filename" \
180 http://cloud-images.ubuntu.com/releases/\${series}/release/ubuntu-\${version}-server-cloudimg-${ARCH_TYPE}-root.tar.gz
181 fi
182 done
183
184 # Set up squid in the LXC template
185 for lxc_template in \$(ls /var/cache/lxc/cloud-*/*-root.tar.gz); do
186 gunzip "\$lxc_template"
187 unwrapped_name=\$(dirname "\$lxc_template")/\$(basename "\$lxc_template" .gz)
188 mkdir -p etc/apt/apt.conf.d
189 echo 'Acquire::http::Proxy "http://10.0.3.1:8000";' > etc/apt/apt.conf.d/90proxy
190 tar rf "\$unwrapped_name" etc/apt/apt.conf.d/90proxy
191 gzip "\$unwrapped_name"
192 rm -rf etc
193 done
194fi
195
196bash /tmp/jujuredir/setup-juju.sh 6079
197echo "export JUJU_REPOSITORY=/charms" >> /home/vagrant/.bashrc
198SCRIPT
199
200system 'mkdir', '-p', 'charms'
201
202Vagrant.configure("2") do |config|
203 # This Vagrantfile is auto-generated by 'vagrant package' to contain
204 # the MAC address of the box. Custom configuration should be placed in
205 # the actual 'Vagrantfile' in this box.
206
207 config.vm.base_mac = "${macaddr}"
208 config.vm.network :forwarded_port, guest: 22, host: 2122, host_ip: "127.0.0.1"
209 config.vm.network :forwarded_port, guest: 80, host: 6080, host_ip: "127.0.0.1"
210 config.vm.network :forwarded_port, guest: 6079, host: 6079, host_ip: "127.0.0.1"
211 config.vm.network "private_network", ip: "172.16.250.15"
212 config.vm.provider "virtualbox" do |vb|
213 vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
214 end
215 config.vm.provision "shell", inline: \$script
216
217 config.vm.synced_folder "charms/", "/charms"
218end
219
220# Load include vagrant file if it exists after the auto-generated
221# so it can override any of the settings
222include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__)
223load include_vagrantfile if File.exist?(include_vagrantfile)
224EOF
225
226# Now pack it all up....
227tar -C ${WORKSPACE}/box -Scvf ${box_name} box.ovf Vagrantfile box-disk1.vmdk ||
228 fail "Unable to create box file"
229
230# Some minor cleanup
231rm ${disk_working} ${disk_working//.img/.raw} || /bin/true
232rm -rf ${WORKSPACE}/box *.vdi
233[ -e "${raw_f}" ] && rm "${raw_f}"
234
235# Bail here if this is a local build
236[ "${LOCAL_BUILD:-0}" -eq 1 ] && exit 0
237
238# Put the box in place
239mkdir -p "${final_location}" ||
240 fail "Unable to create the vagrant image location"
241
242cp ${box_name} ${final_location} ||
243 fail "Failed to place vagrant image in final home"
244
245# Now Checksum it all
246
247# Override and set some home variables
248export HOME="/srv/builder"
249export CDIMAGE_BIN="${HOME}/cdimage/bin"
250PUBLISH_SCRIPTS=${HOME}/ec2-publishing-scripts
251export CDIMAGE_ROOT="${HOME}/cdimage"
252export PATH="${PUBLISH_SCRIPTS}:${CDIMAGE_BIN}:${PATH}"
253checksum-directory ${final_location}
0254
=== added file 'jenkins/CloudImages_Update_Builder.sh'
--- jenkins/CloudImages_Update_Builder.sh 1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Update_Builder.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,68 @@
1#!/bin/bash
2
3# Set default umask
4umask 022
5
6# Read in the common files
7source "${kvm}/functions/common"
8source "${kvm}/functions/retry"
9
10# Apply the build stuff
11find . -iname "*build_properties" | xargs -I FILE cp FILE .
12[ -e "build_properties" ] &&
13 source build_properties ||
14 fail "Failed to read build_properties. I don't know what I'm doing!"
15
16[ -e failed ] && rm
17[ -e success ] && rm
18
19# Copy the target disk image
20case ${SUITE} in
21 trusty|xenial)
22 disk_orig="${SUITE}-server-cloudimg-${ARCH}-disk1.img"
23 builder_img=/srv/builder/images/trusty-builder-latest.img
24 ;;
25 zesty)
26 # Zesty needs yakkety or newer due to ext4 tool changes
27 disk_orig="${SUITE}-server-cloudimg-${ARCH}.img"
28 builder_img=/srv/builder/images/zesty-builder-latest.img
29 ;;
30 *)
31 disk_orig="${SUITE}-server-cloudimg-${ARCH}.img"
32 builder_img=/srv/builder/images/artful-builder-latest.img
33 ;;
34esac
35
36disk_cp="${disk_orig//cloudimg/cloudimg-builder-$(date +%Y%m%d)}"
37disk_root="/srv/ec2-images/${SUITE}/${SERIAL:-current}"
38disk_working="${WORKSPACE}/${disk_cp}"
39raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
40
41cp "${disk_root}/${disk_orig}" "${disk_working}" ||
42 fail "Unable to copy ${disk_orig} from ${disk_root}"
43
44qemu-img resize "${disk_working}" 5G ||
45 fail "unable to resize disk"
46
47# Launch KVM to do the work
48${kvm}/launch_kvm.sh \
49 --id "${ARCH}-${BUILD_ID}" \
50 --user-data "${kvm}/config/cloud-vps.cfg" \
51 --cloud-config "${kvm}/templates/img-update.tmpl" \
52 --extra-disk "${disk_working}" \
53 --disk-gb 5 \
54 --raw-disk "${raw_f}" \
55 --raw-size 1 \
56 --img-url ${builder_img} ||
57 fail "KVM instance failed to build image."
58
59# Remove the results
60rm "${raw_f}" || /bin/true
61
62# Compress it down...
63mv "${disk_working}" "${disk_working}.new"
64qemu-img convert "${disk_working}.new" -c -O qcow2 "${disk_working}" ||
65 fail "Failed to create compressed image"
66
67rm "${disk_working}.new"
68
069
=== added file 'jenkins/CloudImages_Vagrant.sh'
--- jenkins/CloudImages_Vagrant.sh 1970-01-01 00:00:00 +0000
+++ jenkins/CloudImages_Vagrant.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,232 @@
1#!/bin/bash
2
3# Set default umask
4umask 022
5
6# Read in the common files
7source "${kvm}/functions/common"
8source "${kvm}/functions/retry"
9source "${kvm}/functions/locker"
10export HOME=${WORKSPACE}
11
12# Apply the build stuff
13[ -e "build_properties" ] &&
14 source build_properties ||
15 fail "Failed to read build_properties. I don't know what I'm doing!"
16
17rm {failed,success} || /bin/true
18
19# Copy the target disk image
20ARCH_TYPE=${ARCH_TYPE:-$ARCH}
21disk_orig="${SUITE}-server-cloudimg-${ARCH_TYPE}-disk1.img"
22disk_cp="${disk_orig//$ARCH_TYPE/$ARCH_TYPE-vagrant}"
23disk_root="${SRV_D:-/srv/ec2-images}/${SUITE}/${SERIAL:-current}"
24disk_working="${WORKSPACE}/${disk_cp}"
25final_disk="${WORKSPACE}/box-disk1.vdi"
26final_location="${OUTPUT_D:-/srv/ec2-images}/vagrant/${SUITE}/${SERIAL}"
27box_name="${disk_working//.img/.box}"
28raw_f="${WORKSPACE}/raw_f-$(date +%s).img"
29
30[ -e "${final_location}/${box_name}" -a "${REBUILD}" != "true" ] && exit 0
31
32cp "${disk_root}/${disk_orig}" "${disk_working}" ||
33 fail "Unable to copy ${disk_orig} from ${disk_root}"
34
35# Resize it to 4G, but not the full 40G because we want it sparse
36qemu-img resize ${disk_working} 4G
37
38# Launch KVM to do the work
39${kvm}/launch_kvm.sh \
40 --id "${ARCH_TYPE}-${BUILD_ID}" \
41 --user-data "${kvm}/config/cloud-vps.cfg" \
42 --cloud-config "${kvm}/templates/img-vagrant.tmpl" \
43 --extra-disk "${disk_working}" \
44 --disk-gb 1 \
45 --raw-disk "${raw_f}" \
46 --raw-size 1 \
47 --img-url /srv/builder/images/precise-builder-latest.img ||
48 fail "KVM instance failed to build image."
49
50# Covert to VMDK.
51qemu-img convert -O raw ${disk_working} ${disk_working//.img/.raw}
52truncate -s 40G ${disk_working//.img/.raw}
53
54_vbox_cmd convertfromraw \
55 --format vdi \
56 ${disk_working//.img/.raw} ${final_disk}
57
58# Create the VM
59vmname="ubuntu-cloudimg-${SUITE}-vagrant-${ARCH_TYPE}"
60_vbox_cmd modifyhd --compact ${final_disk}
61
62dist_v="Ubuntu"
63[ "${ARCH_TYPE}" = "amd64" ] && dist_v="Ubuntu_64"
64_vbox_cmd createvm \
65 --name ${vmname} \
66 --ostype ${dist_v} \
67 --register
68
69_vbox_cmd modifyvm ${vmname} \
70 --memory 512 \
71 --boot1 disk \
72 --boot2 none \
73 --boot3 none \
74 --boot4 none \
75 --vram 12 \
76 --pae off \
77 --acpi on \
78 --ioapic on \
79 --rtcuseutc on
80# --natnet1 default \
81
82if [ "${ARCH_TYPE}" = "i386" ]; then
83 _vbox_cmd modifyvm ${vmname} \
84 --ioapic off \
85 --pae on
86fi
87
88
89_vbox_cmd modifyvm ${vmname} --natpf1 "guestssh,tcp,,2222,,22"
90
91_vbox_cmd storagectl "${vmname}" \
92 --name "SATAController" \
93 --add sata \
94 --controller IntelAhci \
95 --sataportcount 1 \
96 --hostiocache on
97
98_vbox_cmd storageattach ${vmname} \
99 --storagectl "SATAController" \
100 --port 0 \
101 --device 0 \
102 --type hdd \
103 --medium ${final_disk}
104
105# Set extra-data
106_vbox_cmd setextradata ${vmname} installdate ${serial}
107_vbox_cmd setextradata ${vmname} supported false
108
109# Set the Guest information to get rid of error message
110[ -e vagrant_image.pkgs ] && {
111
112 vbox_version=""
113 while read -r line
114 do
115 line=( $(echo ${line}) )
116 [[ ${line[0]} =~ virtualbox-guest-utils ]] && vbox_version=${line[1]}
117 done < vagrant_image.pkgs
118 debug "Guest Additions version is ${vbox_version}"
119
120 # Set the revision to some arbitrary value
121 _vbox_cmd guestproperty set ${vmname} \
122 "/VirtualBox/GuestAdd/Revision" '8000'
123
124 # Set the Ubuntu packaged version correctly
125 _vbox_cmd guestproperty set ${vmname} \
126 "/VirtualBox/GuestAdd/VersionExt" \
127 "${vbox_version//-dfsg-*/_Ubuntu}"
128
129 # Set the version string appropriately
130 _vbox_cmd guestproperty set ${vmname} \
131 "/VirtualBox/GuestAdd/Version" \
132 "${vbox_version//-dfsg-*/}"
133}
134
135mkdir box
136_vbox_cmd export ${vmname} --output box/box.ovf
137
138# Create the Vagrant file
139#macaddr="02:$(openssl rand -hex 5)"
140macaddr=$(awk '-F"' '/<Adapter slot="0" enabled="true"/ {print$6}' ${WORKSPACE}/box/box.ovf)
141cat << EOF > ${WORKSPACE}/box/Vagrantfile
142Vagrant::Config.run do |config|
143 # This Vagrantfile is auto-generated by 'vagrant package' to contain
144 # the MAC address of the box. Custom configuration should be placed in
145 # the actual 'Vagrantfile' in this box.
146 config.vm.base_mac = "${macaddr}"
147end
148
149# Load include vagrant file if it exists after the auto-generated
150# so it can override any of the settings
151include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__)
152load include_vagrantfile if File.exist?(include_vagrantfile)
153EOF
154
155# Now pack it all up....
156tar -C ${WORKSPACE}/box -Scvf ${box_name} box.ovf Vagrantfile box-disk1.vmdk ||
157 fail "Unable to create box file"
158
159# Some minor cleanup
160rm ${disk_working} ${disk_working//.img/.raw} || /bin/true
161rm -rf ${WORKSPACE}/box *.vdi
162rm "${raw_f}" || /bin/true
163
164# Put the box in place
165mkdir -p "${final_location}" ||
166 fail "Unable to create the vagrant image location"
167
168cp ${box_name} ${final_location} ||
169 fail "Failed to place vagrant image in final home"
170
171# box_d is where the boxes are stored
172box_d="${OUTPUT_D:-/srv/ec2-images}/vagrant/${SUITE}"
173
174# Only proceed if the required boxes exist
175boxes=($(find ${box_d}/${SERIAL} -regextype posix-extended -regex ".*(amd64|i386)-vagrant-disk1.box"))
176if [ "${#boxes[@]}" -ne 2 ]; then
177 echo "Not updating current, required boxes are missing"
178 [[ ! "${boxes[@]}" =~ "amd64" ]] && echo "Missing build for amd64"
179 [[ ! "${boxes[@]}" =~ "i386" ]] && echo "Missing build for i386"
180
181 # We don't want to fail here.
182 exit 0
183else
184 echo "Updating current links; all builds are present"
185fi
186
187# Update the link to current
188current_l="${box_d}/current"
189[ -e "${current_l}" ] && rm "${current_l}"
190( cd "${box_d}" && ln -s "${SERIAL}" current )
191
192# Cleanup old builds
193builds=($(find ${box_d} -mindepth 1 -maxdepth 1 -type d | sort -r))
194build_count="${#builds[@]}"
195
196echo "------------------------"
197echo "Clean-up for prior builds"
198echo "Found ${build_count} builds for consideration"
199
200for b in ${builds[@]}
201do
202 echo " - found build ${b}"
203done
204echo ""
205
206[ "${build_count}" -gt 4 ] && {
207 for item in $(seq 4 ${build_count})
208 do
209 [ -e "${builds[$item]}" ] && {
210 echo "Removing build ${builds[$item]} for deletion"
211 rm -rf ${builds[$item]} ||
212 echo "Failed to remove build ${builds[$item]}"
213 }
214 done
215
216 for item in $(seq 0 3)
217 do
218 [ -e "${builds[$item]}" ] &&
219 echo "Preserving build ${builds[$item]}"
220 done
221
222} || echo "No builds marked for removal"
223
224
225# Override and set some home variables
226export HOME="/srv/builder"
227export CDIMAGE_BIN="${HOME}/cdimage/bin"
228PUBLISH_SCRIPTS=${HOME}/ec2-publishing-scripts
229export CDIMAGE_ROOT="${HOME}/cdimage"
230export PATH="${PUBLISH_SCRIPTS}:${CDIMAGE_BIN}:${PATH}"
231checksum-directory ${final_location}
232
0233
=== added file 'jenkins/MAAS_Builder.sh'
--- jenkins/MAAS_Builder.sh 1970-01-01 00:00:00 +0000
+++ jenkins/MAAS_Builder.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,171 @@
1#!/bin/bash
2set -x
3fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
4
5find . -iname "*build_properties" -exec cp {} . \; ||
6 echo "Unable to copy build properties, this might be v2"
7
8[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
9 source build_properties ||
10 fail "Failed to read build_properties. I don't know what I'm doing!";
11}
12
13# Read in the common functions
14my_dir="$( cd "$( dirname "$0" )" && pwd )"
15base_dir=$(dirname ${my_dir})
16export PATH="${base_dir}:${my_dir}:${PATH}"
17source "${base_dir}/functions/locker"
18source "${base_dir}/functions/common"
19source "${base_dir}/functions/retry"
20source ${my_dir}/build_lib.sh
21select_build_config
22
23export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
24out_f="${WORKSPACE}/maas-${SUITE}-${STREAM}-config.sh"
25raw_f="${WORKSPACE}/${SUITE}-output.raw"
26query_t="${WORKSPACE}/cloud-images-query.tar"
27base_name="${SUITE}-server-cloudimg"
28rel_base_name="ubuntu-$(ubuntu-adj2version ${SUITE})-${stream//-/}-server-cloudimg"
29
30export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
31
32case "${STREAM}" in
33 release) build_f="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}";
34 base_name=${rel_base_name};
35 out_d="/srv/maas-images/ephemeral/releases/${SUITE}/release-${SERIAL}"
36 ;;
37 daily) build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
38 out_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}";
39 ;;
40 alpha*|beta*) build_f="/srv/ec2-images/releases/${SUITE}/${STREAM}";
41 base_name=${rel_base_name};
42 out_d="/srv/maas-images/ephemeral/release/${SUITE}/${STREAM}";
43 ;;
44 *) fail "Unknown stream ${STREAM}.";;
45esac
46
47final_out_d="${out_d}"
48
49[ -e "${final_out_d}" -a "${REBUILD:-false}" = "false" ] &&
50 fail "Build already exists. Rebuild is set to false. Failing this build"
51
52# Tar up query for use in-image
53[ ! -e "${query_t}" ] && {
54 tar cvf ${query_t} \
55 ${QUERY_D:-/srv/ec2-images/query} \
56 ${build_f} \
57 --exclude "*img" --exclude "*azure*" --exclude "*html" \
58 --exclude "*armel*" --exclude "*root.tar.gz" \
59 --exclude "*floppy" ||
60 fail "Failed to pack up build elements for MAAS builder"; }
61
62# Generate the template file
63ci_cfg="${kvm_builder}/config/cloud-maas.cfg"
64template="${kvm_builder}/templates/img-maas.tmpl"
65[ "${IS_MAAS_V2:-0}" -eq 1 ] && {
66 template="${kvm_builder}/templates/img-maasv2.tmpl"
67 ci_cfg="${kvm_builder}/config/cloud-maasv2.cfg"
68}
69
70maas_config.sh \
71 --distro "${SUITE}" \
72 --stream "${STREAM}" \
73 --template "${template}" \
74 --base-name "${base_name}" \
75 --local "${build_f}" \
76 --serial "${SERIAL}" \
77 --out "${out_f}" \
78 --out_d "${out_d}" ||
79 fail "Failed to configure KVM instance for building"
80
81[ -n "${cloud_init_cfg}" ] && ci_cfg="${kvm_builder}/config/${cloud_init_cfg}"
82
83# Launch KVM to do the work
84launch_kvm.sh \
85 --id ${BUILD_ID} \
86 --user-data "${out_f}" \
87 --cloud-config "${ci_cfg}" \
88 --extra-disk "${query_t}" \
89 --disk-gb 50 \
90 --raw-disk "${raw_f}" \
91 --raw-size 20 \
92 --img-url ${BUILDER_CLOUD_IMAGE} ||
93 fail "KVM instance failed to build image."
94
95# Extract the result set
96tar -xvvf "${raw_f}" ||
97 fail "Failed to extract information from instance"
98
99# Useful for off-host builds, like ppc64el. Just make sure that any-off host
100# builds are done before the on-hosts builds.
101[ "${BUILD_ONLY:-0}" -eq 1 ] && exit 0
102
103# Extracted reslts should be here
104[ ! -e "${WORKSPACE}/${out_d}" ] && fail "Expected result directory is missing: ${WORKSPACE}/${out_d}"
105
106# Checksum the results (and sign 'em)
107export CDIMAGE_ROOT="/srv/builder/vmbuilder/cdimage"
108/srv/builder/vmbuilder/bin/cronrun checksum-directory "${WORKSPACE}/${out_d}" ||
109 fail "Failed to create checksums and GPG signatures"
110
111set -x
112# Put the bits where they go...
113mkdir -p "${final_out_d}" &&
114 cp -a ${WORKSPACE}${out_d}/* "${final_out_d}" &&
115 echo "Copied bits to final location ${final_out_d}" ||
116 fail "Unable to copy build bits to final location"
117
118# Produce build-info
119cat << EOF > "${final_out_d}/build-info.txt"
120serial=${SERIAL}
121orig_prefix=${SUITE}-ephemeral-maas
122suite=${SUITE}
123build_name=ephemeral
124EOF
125
126# Clean up the dailies
127if [ "${STREAM}" = "daily" ]; then
128 base_d="${out_d%/*}"
129 builds=( $(find ${base_d} -maxdepth 1 -mindepth 1 -type d | sort -r) )
130 build_count=${#builds[@]}
131
132 # Delete all but the
133 if [ ${build_count} -gt 6 ]; then
134 for item in $(seq 6 ${build_count})
135 do
136 [ -e "${builds[$item]}" ] && {
137 rm -rf ${builds[$item]};
138 echo "Build ${SUITE} ${builds[$item]##*/} has been deleted";
139 }
140 done
141
142 for item in $(seq 0 5)
143 do
144 echo "Preserving ${SUITE} ${builds[$item]##*/}"
145 done
146 else
147 echo "No builds marked for deletion"
148 fi
149fi
150
151# Generate the Query2 tree
152src_tree="${WORKSPACE}/maas_src"
153bzr branch "${maas_branch}" "${src_tree}"
154${src_tree}/tree2query \
155 --commit-msg "Build ${BUILD_ID}" \
156 --namespace maas \
157 /srv/maas-images
158
159# Update current
160if [ "${STREAM}" = "daily" ]; then
161 cur_d="/srv/maas-images/ephemeral/daily/${SUITE}/current"
162 [ -e "${cur_d}" ] && rm "${cur_d}"
163 ln -s "${final_out_d}" "${cur_d}" ||
164 echo "Failed to update ${cur_d}"
165fi
166
167
168# Remove the results
169rm "${raw_f}" ||
170 fail "Failed to clean up files!"
171
0172
=== added file 'jenkins/MAAS_Promotion.sh'
--- jenkins/MAAS_Promotion.sh 1970-01-01 00:00:00 +0000
+++ jenkins/MAAS_Promotion.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,31 @@
1#!/bin/bash
2
3fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
4
5if [ "${TAG}" == "release" ]; then
6 TAG="release-${SERIAL}"
7fi
8
9src_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}"
10final_out_d="/srv/maas-images/ephemeral/releases/${SUITE}/${TAG}"
11
12[ -e ${src_d} ] ||
13 fail "Source ${src_d} does not exist"
14
15[ -e ${final_out_d} ] &&
16 fail "Serial has already been promoted"
17
18mkdir -p "${final_out_d}" &&
19 rsync -a ${src_d}/ ${final_out_d} &&
20 echo "Copied bits to final location ${final_out_d}" ||
21 fail "Unable to copy build bits to final location"
22
23# Generate the Query2 tree
24export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
25src_tree="${WORKSPACE}/maas_src"
26bzr branch "${maas_branch}" "${src_tree}"
27${src_tree}/tree2query \
28 --commit-msg "Build ${BUILD_ID}" \
29 --namespace maas \
30 /srv/maas-images
31
032
=== added file 'jenkins/MAASv2_Builder.sh'
--- jenkins/MAASv2_Builder.sh 1970-01-01 00:00:00 +0000
+++ jenkins/MAASv2_Builder.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,191 @@
1#!/bin/bash
2set -x
3fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
4
5find . -iname "*build_properties" -exec cp {} . \; ||
6 echo "Unable to copy build properties, this might be v2"
7
8[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
9 source build_properties ||
10 fail "Failed to read build_properties. I don't know what I'm doing!";
11}
12
13STREAM="${STREAM:-daily}"
14# Read in the common functions
15my_dir="$( cd "$( dirname "$0" )" && pwd )"
16base_dir=$(dirname ${my_dir})
17export PATH="${base_dir}:${my_dir}:${PATH}"
18source "${base_dir}/functions/locker"
19source "${base_dir}/functions/common"
20source "${base_dir}/functions/retry"
21source ${my_dir}/build_lib.sh
22select_build_config
23
24export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
25out_f="${WORKSPACE}/maas-${SUITE}-${STREAM}-config.sh"
26raw_f="${WORKSPACE}/${SUITE}-output.raw"
27query_t="${WORKSPACE}/cloud-images-query.tar"
28base_name="${SUITE}-server-cloudimg"
29rel_base_name="ubuntu-${VERSION:-$(ubuntu-adj2version ${SUITE})}-${stream//-/}-server-cloudimg"
30
31export maas_branch_v1="http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral"
32export maas_branch="${MAAS_BRANCH:-$maas_branch_v1}"
33
34case "${STREAM}" in
35 release) build_f="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}";
36 base_name=${rel_base_name};
37 out_d="/srv/maas-images/ephemeral/releases/${SUITE}/release-${SERIAL}"
38 ;;
39 daily) build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
40 out_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}";
41 ;;
42 alpha*|beta*) build_f="/srv/ec2-images/releases/${SUITE}/${STREAM}";
43 base_name=${rel_base_name};
44 out_d="/srv/maas-images/ephemeral/releases/${SUITE}/${STREAM}";
45 ;;
46 *) fail "Unknown stream ${STREAM}.";;
47esac
48
49final_out_d="${out_d}"
50
51[ -e "${final_out_d}" -a "${REBUILD:-false}" = "false" ] &&
52 fail "Build already exists. Rebuild is set to false. Failing this build"
53
54if [ ! -e "${query_t}" ]; then
55
56 if [ "${MAASv2:-0}" -eq 1 ]; then
57 # MAAS v2 doesn't need this information
58 out_d="/tmp/maas_final"
59 touch ${WORKSPACE}/maasv2
60 tar cvf ${query_t} ${WORKSPACE}/maasv2
61
62 if [ -e "${WORKSPACE}/tmp/maas-final" ]; then
63 tar cvf ${query_t} maas-final ||
64 fail "Failed to create tarball of MAAS images"
65 fi
66
67 else
68 # MAAS v1 need information
69 tar cvf ${query_t} \
70 ${QUERY_D:-/srv/ec2-images/query} \
71 ${build_f} \
72 --exclude "*img" --exclude "*azure*" --exclude "*html" \
73 --exclude "*armel*" --exclude "*root.tar.gz" \
74 --exclude "*floppy" ||
75 fail "Failed to pack up build elements for MAAS builder";
76 fi
77fi
78
79# Select the right template
80tmpl="${kvm_builder}/templates/img-maas.tmpl"
81[ "${MAASv2:-0}" -eq 1 ] && tmpl="${tmpl//maas.tmpl/maasv2.tmpl}"
82
83# Construct the right template
84maas_config.sh \
85 --distro "${SUITE}" \
86 --stream "${STREAM}" \
87 --template "${tmpl}" \
88 --base-name "${base_name}" \
89 --local "${build_f}" \
90 --serial "${SERIAL}" \
91 --out "${out_f}" \
92 --maas-branch "${maas_branch}" \
93 --out_d "${out_d}" ||
94 fail "Failed to configure KVM instance for building"
95set +x
96
97ci_cfg="${kvm_builder}/config/cloud-maasv2.cfg"
98[ "$(uname -m)" == "ppc64" ] && ci_cfg="${kvm_builder}/config/cloud-trusty-pp64el.cfg"
99
100# Launch KVM to do the work
101launch_kvm.sh \
102 --id ${BUILD_ID} \
103 --user-data "${out_f}" \
104 --cloud-config "${ci_cfg}" \
105 --extra-disk "${query_t}" \
106 --disk-gb 50 \
107 --raw-disk "${raw_f}" \
108 --raw-size 20 \
109 --mem 1G \
110 --img-url ${BUILDER_CLOUD_IMAGE} ||
111 fail "KVM instance failed to build image."
112
113# Extract the result set
114tar -xvvf "${raw_f}" ||
115 fail "Failed to extract information from instance"
116
117# Useful for off-host builds, like ppc64el. Just make sure that any-off host
118# builds are done before the on-hosts builds.
119
120[ "${BUILD_ONLY:-0}" -eq 1 ] && exit 0
121[ "${MAASv2:-0}" -eq 1 ] && exit 0
122
123# Extracted reslts should be here
124[ ! -e "${WORKSPACE}/${out_d}" ] && fail "Expected result directory is missing: ${WORKSPACE}/${out_d}"
125
126# Checksum the results (and sign 'em)
127export CDIMAGE_ROOT="/srv/builder/vmbuilder/cdimage"
128/srv/builder/vmbuilder/bin/cronrun checksum-directory "${WORKSPACE}/${out_d}" ||
129 fail "Failed to create checksums and GPG signatures"
130
131set -x
132# Put the bits where they go...
133mkdir -p "${final_out_d}" &&
134 cp -a ${WORKSPACE}${out_d}/* "${final_out_d}" &&
135 echo "Copied bits to final location ${final_out_d}" ||
136 fail "Unable to copy build bits to final location"
137
138# Produce build-info
139cat << EOF > "${final_out_d}/build-info.txt"
140serial=${SERIAL}
141orig_prefix=${SUITE}-ephemeral-maas
142suite=${SUITE}
143build_name=ephemeral
144EOF
145
146# Clean up the dailies
147if [ "${STREAM}" = "daily" ]; then
148 base_d="${out_d%/*}"
149 builds=( $(find ${base_d} -maxdepth 1 -mindepth 1 -type d | sort -r) )
150 build_count=${#builds[@]}
151
152 # Delete all but the
153 if [ ${build_count} -gt 6 ]; then
154 for item in $(seq 6 ${build_count})
155 do
156 [ -e "${builds[$item]}" ] && {
157 rm -rf ${builds[$item]};
158 echo "Build ${SUITE} ${builds[$item]##*/} has been deleted";
159 }
160 done
161
162 for item in $(seq 0 5)
163 do
164 echo "Preserving ${SUITE} ${builds[$item]##*/}"
165 done
166 else
167 echo "No builds marked for deletion"
168 fi
169fi
170
171# Generate the Query2 tree
172src_tree="${WORKSPACE}/maas_src"
173bzr branch "${maas_branch_v1}" "${src_tree}"
174${src_tree}/tree2query \
175 --commit-msg "Build ${BUILD_ID}" \
176 --namespace maas \
177 /srv/maas-images
178
179# Update current
180if [ "${STREAM}" = "daily" ]; then
181 cur_d="/srv/maas-images/ephemeral/daily/${SUITE}/current"
182 [ -e "${cur_d}" ] && rm "${cur_d}"
183 ln -s "${final_out_d}" "${cur_d}" ||
184 echo "Failed to update ${cur_d}"
185fi
186
187
188# Remove the results
189rm "${raw_f}" ||
190 fail "Failed to clean up files!"
191
0192
=== added file 'jenkins/MAASv2_Cleaner.sh'
--- jenkins/MAASv2_Cleaner.sh 1970-01-01 00:00:00 +0000
+++ jenkins/MAASv2_Cleaner.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,55 @@
1#!/bin/bash
2#
3# Clean up MAAS2 v2/v3 builds/streams
4#
5my_dir="$(dirname $0)"
6my_p_dir="$(dirname $my_dir)"
7source ${my_p_dir}/functions/common
8source ${my_p_dir}/functions/bzr_check.sh
9
10# Number of builds to publish in the stream
11MAX_BUILDS=${MAX_BUILDS:-3}
12# Number of days to keep files not referenced in the stream data
13REAP_AGE=${REAP_AGE:-2d}
14
15WORKSPACE=${WORKSPACE:-$PWD}
16OUTDIR=${JENKINS_HOME:?}/.config/MAASv2_Cleaner/
17DAILY_ROOT=/srv/maas-images/ephemeral-v2/daily/
18RELEASE_ROOT=/srv/maas-images/ephemeral-v2/releases/
19INDEX_PATH=streams/v1/index.json
20
21# Local checkouts
22sstreams=${WORKSPACE}/sstreams
23maasv2=${WORKSPACE}/maasv2
24check_branch ${BZR_SIMPLESTREAMS:-lp:simplestreams} ${sstreams}
25check_branch ${BZR_MAASv2:-lp:maas-images} ${maasv2}
26
27for METADATA_ROOT in /srv/maas-images/ephemeral-v2/daily/ \
28 /srv/maas-images/ephemeral-v3/daily/; do
29 case $METADATA_ROOT in
30 *v2*)
31 orphan_json="${OUTDIR}/daily.json"
32 ;;
33 *v3*)
34 orphan_json="${OUTDIR}/daily-v3.json"
35 ;;
36 *)
37 echo "Unexpected METADATA_ROOT"
38 exit 1
39 ;;
40 esac
41 run PYTHONPATH=${sstreams}:${maasv2} \
42 ${maasv2}/bin/meph2-util clean-md \
43 ${MAX_BUILDS} ${METADATA_ROOT}/${INDEX_PATH}
44
45 run PYTHONPATH=${sstreams}:${maasv2} \
46 ${maasv2}/bin/meph2-util find-orphans \
47 "${orphan_json}" \
48 ${METADATA_ROOT} ${METADATA_ROOT}/${INDEX_PATH}
49
50 run PYTHONPATH=${sstreams}:${maasv2} \
51 ${maasv2}/bin/meph2-util reap-orphans \
52 --older ${REAP_AGE} \
53 "${orphan_json}" \
54 ${METADATA_ROOT}
55done
056
=== added file 'jenkins/MAASv3_Builder.sh'
--- jenkins/MAASv3_Builder.sh 1970-01-01 00:00:00 +0000
+++ jenkins/MAASv3_Builder.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,67 @@
1#!/bin/bash -x
2
3fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
4
5[ -z "${SERIAL}" -a -z "${SUITE}" -a -e "build_properties" ] && {
6 source build_properties ||
7 fail "Failed to read build_properties.";
8}
9
10# Read in the common functions
11my_dir="$( cd "$( dirname "$0" )" && pwd )"
12base_dir=$(dirname ${my_dir})
13export PATH="${base_dir}:${my_dir}:${PATH}"
14source "${base_dir}/functions/locker"
15source "${base_dir}/functions/common"
16source "${base_dir}/functions/retry"
17source ${my_dir}/build_lib.sh
18select_build_config
19
20case "${STREAM:?}" in
21 daily) build_f="/srv/ec2-images/${SUITE}/${SERIAL}";
22 base_name="${SUITE}-server-cloudimg"
23 ;;
24 *) fail "Unknown/unsupported stream ${STREAM}.";;
25esac
26
27export WORKSPACE="${WORKSPACE:-$WORKSPACE_R}"
28out_f="${WORKSPACE:?}/maas-${SUITE}-${STREAM}-config.sh"
29raw_f="${WORKSPACE}/${SUITE}-output.raw"
30
31export maas_branch="${MAAS_BRANCH:?}"
32
33touch ${WORKSPACE}/maasv3
34tar cvf ${query_t} ${WORKSPACE}/maasv3
35
36# Construct the right template
37maas_config.sh \
38 --distro "${SUITE}" \
39 --stream "${STREAM}" \
40 --template "${kvm_builder}/templates/img-maasv3.tmpl" \
41 --base-name "${base_name}" \
42 --local "${build_f}" \
43 --serial "${SERIAL}" \
44 --out "${out_f}" \
45 --maas-branch "${maas_branch}" \
46 --out_d "/tmp/maas_final" ||
47 fail "Failed to configure KVM instance for building"
48
49ci_cfg="${kvm_builder}/config/cloud-maasv3.cfg"
50[ "$(uname -m)" == "ppc64" ] && ci_cfg="${kvm_builder}/config/cloud-trusty-pp64el.cfg"
51
52# Launch KVM to do the work
53launch_kvm.sh \
54 --id ${BUILD_ID} \
55 --user-data "${out_f}" \
56 --cloud-config "${ci_cfg}" \
57 --extra-disk "${query_t}" \
58 --disk-gb 50 \
59 --raw-disk "${raw_f}" \
60 --raw-size 20 \
61 --mem 1G \
62 --img-url ${BUILDER_CLOUD_IMAGE} ||
63 fail "KVM instance failed to build image."
64
65# Extract the result set
66tar -xvvf "${raw_f}" ||
67 fail "Failed to extract information from instance"
068
=== added file 'jenkins/Promote_Daily.sh'
--- jenkins/Promote_Daily.sh 1970-01-01 00:00:00 +0000
+++ jenkins/Promote_Daily.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,55 @@
1#!/bin/bash
2echo "---------------------------------------------------"
3echo "Instructed to Promote Daily job:
4echo " Suite: ${SUITE}"
5echo " Serial: ${SERIAL}"
6echo " Milestone: ${MILESTONE_LABEL}"
7echo " Stream: ${BTYPE}"
8echo " Public: ${MAKE_PUBLIC}"
9echo " PrePublish: ${PREPUBLISH}"
10echo "
11echo "---------------------------------------------------"
12
13cat << EOF > "${WORKSPACE}/build_properties"
14SUITE=${SUITE}
15SERIAL=${SERIAL}
16MILESTONE=${MILESTONE_LABEL}
17STREAM=${BTYPE}
18PUBLIC=${MAKE_PUBLIC}
19PREPUBLISH=${PREPUBLISH}
20EOF
21
22export HOME="/srv/builder/vmbuilder"
23
24cmd=(
25 '/srv/builder/vmbuilder/bin/cronrun'
26 'promote-daily'
27 '--verbose'
28 '--allow-existing' )
29
30if [ "${PREPUBLISH}" == "true" ]; then
31 echo "Pre-publishing rules, will not make public"
32else
33 [ "${MAKE_PUBLIC}" == "true" ] && cmd+=('--make-public')
34fi
35
36case ${BTYPE} in
37 *server*hwe*) pub_path="/srv/ec2-images/server/${SUITE}/${SERIAL}/${BTYPE//server-/}";;
38 *) pub_path="/srv/ec2-images/${BTYPE}/${SUITE}/${SERIAL}"
39 ;;
40esac
41
42if [ "${REPUBLISH}" == "true" ]; then
43 cmd+=('--republish')
44 if [ "${MILESTONE_LABEL}" == "release" ]; then
45 pub_path="/srv/ec2-images/releases/${SUITE}/release-${SERIAL}"
46 else
47 pub_path="/srv/ec2-images/releases/${SUITE}/${MILESTONE_LABEL}"
48 fi
49 [[ "${BTYPE}" =~ server-hwe ]] && pub_path="${pub_path}/${BTYPE//server-/}"
50fi
51
52cmd+=("${MILESTONE_LABEL}" ${pub_path})
53
54echo "Executing command: ${cmd[@]}"
55exec ${cmd[@]}
056
=== added file 'jenkins/Promote_MAAS_Daily.sh'
--- jenkins/Promote_MAAS_Daily.sh 1970-01-01 00:00:00 +0000
+++ jenkins/Promote_MAAS_Daily.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,48 @@
1#!/bin/bash
2
3export maas_branch="${MAAS_BRANCH:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
4
5fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
6
7[ -z "${SERIAL}" ] && fail "Serial must be defined"
8[ -z "${SUITE}" ] && fail "Suite must be defined"
9[ -z "${MILESTONE}" ] && fail "Milestone must be defined"
10
11cp_d="/srv/maas-images/ephemeral/daily/${SUITE}/${SERIAL}"
12finald="/srv/maas-images/ephemeral/releases/${SUITE}"
13
14case "${MILESTONE}" in
15 release) final_d="${finald}/release-${SERIAL}"
16 alpha|beta) final_d="${finald}/${milestone}"
17esac
18
19# Sanity check
20[ ! -e "${cp_d}" ] && fail "Serial ${SERIAL} for ${SUITE} does not exist"
21[ -e "${final_d}" ] && fail "Already released ${SERIAL} for ${SUITE} as ${MILESTONE}"
22
23# Make the home directory
24mkdir -p "${final_d}" ||
25 fail "Unable to create final destination"
26
27# Put the files in final destination
28cp -au ${cp_d}/* "${final_d}" ||
29 fail "Failed to copy source files for promotion"
30
31# Generate the Query2 tree
32src_tree="${WORKSPACE}/maas_src"
33bzr branch "${maas_branch}" "${src_tree}"
34${src_tree}/tree2query \
35 --commit-msg "Build ${BUILD_ID}" \
36 --namespace maas \
37 /srv/maas-images
38
39# Update the "release" link
40if [ "${MILESTONE}" = "release" ]; then
41 cur_d="/srv/maas-images/ephemeral/releases/${SUITE}/release"
42 [ -e "${cur_d}" ] && rm "${cur_d}"
43 ln -s "${final_d}" "${cur_d}" ||
44 echo "Failed to update ${cur_d}"
45fi
46
47# Sync the stuff
48KEY=maas /srv/builder/vmbuilder/bin/trigger-sync
049
=== added file 'jenkins/Publish_EC2.sh'
--- jenkins/Publish_EC2.sh 1970-01-01 00:00:00 +0000
+++ jenkins/Publish_EC2.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,64 @@
1#!/bin/bash -x
2
3# Add in the retry stub
4source "${kvm}/functions/retry"
5source "${kvm}/functions/common"
6
7# Exit if trigger job does not want this published
8[ "${PUBLISH_IMAGE}" -eq 0 ] && exit 0
9
10# Set the build directories
11WORK_D="/srv/ec2-images/${BUILD_TYPE}/${SUITE}/${SERIAL}"
12[ "${TEST_BUILD}" -eq 1 ] && WORK_D="/srv/ec2-images/test_builds/${BUILD_TYPE}/${SUITE}/${SERIAL}"
13[ "${SANDBOX_BUILD}" -eq 1 ] && WORK_D="/srv/ec2-images/sandbox/${BUILD_TYPE}/${SUITE}/${SERIAL}"
14
15# Handle the special case of HWE builds. Otherwise they get the names of server
16# and things don't go well.
17[[ "${HWE_SUFFIX}" =~ hwe ]] &&
18 WORK_D="${WORK_D}/${HWE_SUFFIX}" &&
19 BUILD_TYPE="${BUILD_TYPE}-${HWE_SUFFIX}"
20
21
22echo "Using ${WORK_D} as the directory"
23[ -e "${WORK_D}" ] || { echo "Working directory does not exist!"; exit 1; }
24
25ec2_pub="${PWD}/ec2-publishing-scripts"
26
27# Check out the scripts needed
28[ -e "${ec2_pub}" ] && rm -rf "${ec2_pub}"
29bzr branch "${EC2_PUB_SCRIPTS}" "${ec2_pub}"
30
31# Add some elements to the path
32VMBUILDER_PATH="${VMBUILDER_PATH:-/srv/builder/vmbuilder}"
33VMBUILDER_BIN="${VMBUILDER_PATH}/bin"
34XC2_PATH="${VMBUILDER_PATH}/ec2-daily/xc2"
35export PUBLISH_SCRIPTS="${PUBLISH_SCRIPTS:-$VMBUILDER_PATH/ec2-publishing-scripts}"
36export PATH="${VMBUILDER_BIN}:${VMBUILDER_BIN}:${VMBUILDER_PATH}:${XC2_PATH}:${PATH}"
37export HOME="/srv/builder/vmbuilder"
38export CDIMAGE_ROOT="${CDIMAGE_ROOT:-/srv/builder/cdimage}"
39export EC2_PUB_LOC="${ec2_pub}"
40
41ec2publish() {
42 # Run the publisher job
43 ${kvm}/ec2_publisher.sh \
44 ${SUITE} \
45 ${SERIAL} \
46 ${BUILD_TYPE} \
47 ${WORK_D} \
48 ${TEST_BUILD} \
49 ${SANDBOX_BUILD} \
50 ${ALLOW_EXISTING}
51}
52
53# Retry the publishing up to 3 times
54retry 6 120 ec2publish ||
55 fail "Failed three attempts to publish EC2 images!"
56
57# Add the new daily to the tracker
58#exec_tracker=${ADD_TO_TRACKER:-0}
59#[ "${exec_tracker}" -eq 1 ] && {
60# ${kvm}/tracker.sh daily ${SUITE} ${SERIAL} &&
61# exit $? || fail "Unable to execute tracker!"
62# }
63#
64#exit 0
065
=== added file 'jenkins/Publish_Results_to_Tracker.sh'
--- jenkins/Publish_Results_to_Tracker.sh 1970-01-01 00:00:00 +0000
+++ jenkins/Publish_Results_to_Tracker.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,34 @@
1#!/bin/bash
2
3# Environmental variables:
4# HOST: the Jenkins host URL to poll from
5# SUITE: Ubuntu codename
6# MILESTONE: i.e. Alpha 2
7# SERIAL: What is the build serial, i.e 20130213
8# OUT: File to execute
9
10set -x
11
12# Setup the QA tracker code
13bzr branch http://bazaar.launchpad.net/~jibel/+junk/qatracker
14cd qatracker
15sed -i "s/iso.qa.ubuntu.com/cloud.qa.ubuntu.com/g" tracker_update_result
16
17bzr branch http://bazaar.launchpad.net/~ubuntu-qa-website-devel/ubuntu-qa-website/python-qatracker
18ln -s python-qatracker/qatracker.py .
19export PATH="${PATH}:${WORKSPACE}/qatracker"
20
21# Get the actual working script
22${scripts}/tests/tracker.py \
23 --host ${HOST} \
24 --suite ${SUITE} \
25 --test ${TEST} \
26 --milestone "${MILESTONE}" \
27 --serial ${SERIAL} \
28 --out "${WORKSPACE}/script.sh"
29
30# Execute the script
31env API_USER="${API_USER}" \
32 API_KEY="${API_KEY}" \
33 bash ${WORKSPACE}/script.sh ||
34 exit 1 | tee publish.log
035
=== added file 'jenkins/README.txt'
--- jenkins/README.txt 1970-01-01 00:00:00 +0000
+++ jenkins/README.txt 2018-05-31 04:33:07 +0000
@@ -0,0 +1,1 @@
1This directory contains the jobs that Jenkins executes. Most of the jobs just setup up an environmental component and then call another script, usually one directory below
02
=== added file 'jenkins/Test_Azure.sh'
--- jenkins/Test_Azure.sh 1970-01-01 00:00:00 +0000
+++ jenkins/Test_Azure.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,17 @@
1#!/bin/bash
2fail() { [ $# -eq 0 ] || echo "$@"; exit 1; }
3
4umask 022
5set -x
6source watch_properties || fail "Failed to read watch properties"
7
8echo "-------------------"
9echo "Image for testing:"
10cat watch_properties
11echo "-------------------"
12
13
14my_dir="$( cd "$( dirname "$0" )" && pwd )"
15base_dir=$(dirname ${my_dir})
16
17${my_dir}/tests/azure.sh ${1}
018
=== added file 'jenkins/build_lib.sh'
--- jenkins/build_lib.sh 1970-01-01 00:00:00 +0000
+++ jenkins/build_lib.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,33 @@
1#!/bin/bash
2
3# set default umask
4umask 022
5
6# Read in the common functions
7my_dir="$( cd "$( dirname "$0" )" && pwd )"
8base_dir=$(dirname ${my_dir})
9export PATH="${base_dir}:${my_dir}:${PATH}"
10source "${base_dir}/functions/locker"
11source "${base_dir}/functions/common"
12source "${base_dir}/functions/retry"
13
14dist_ge() { [[ "$1" > "$2" || "$1" == "$2" ]]; }
15
16[ -z "${DISTRO}" -a -n "${SUITE}" ] && DISTRO="${SUITE}"
17
18select_build_config() {
19
20 [ -z "${BUILDER_CLOUD_IMAGE}" ] && {
21 # Use the latest 14.04 LTS image to do the build.
22 BUILDER_CLOUD_IMAGE="http://cloud-images.ubuntu.com/releases/trusty/release/ubuntu-14.04-server-cloudimg-amd64-uefi1.img"
23 export cloud_init_cfg="cloud-trusty.cfg"
24 }
25
26 # For ppc64el, we use ppc64el images
27 [ "${ARCH_TYPE}" == "ppc64el" ] && {
28 export cloud_init_cfg="cloud-trusty-pp64el.cfg"
29 BUILDER_CLOUD_IMAGE="${BUILDER_CLOUD_IMAGE//amd64/ppc64el}"
30 export BUILDER_CLOUD_IMAGE="${BUILDER_CLOUD_IMAGE//uefi1/disk1}"
31 }
32 echo "Using ${BUILDER_CLOUD_IMAGE} to do the build"
33}
034
=== added file 'jenkins/env-test.sh'
--- jenkins/env-test.sh 1970-01-01 00:00:00 +0000
+++ jenkins/env-test.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,2 @@
1#!/bin/bash
2env
03
=== added file 'launch_kvm.sh'
--- launch_kvm.sh 1970-01-01 00:00:00 +0000
+++ launch_kvm.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,222 @@
1#!/bin/bash
2usage() {
3cat << EOF
4This program is a KVM wrapper for performing tasks inside a KVM Environment.
5Its primary goal is to help developers do dangerous tasks that their IS/IT
6deparment won't allow them to do on an existing machine.
7 --id <ARG> The ID you want to use to identify the KVM image
8 this is used to name the image
9 --disk-gb <ARG> Disk size you want to resize the image too
10 Default it to _add_ 30GB
11 --smp <ARG> KVM SMP options, defaults to:
12 ${smp_opt}
13 --mem <ARG> How much RAM do you want to use
14 --user-data <ARG> Cloud-Init user-data file
15 --cloud-config <ARG> Cloud-Init cloud-config file
16 --img-url <ARG> Location of the image file.
17 --raw-disk <ARG> Name of RAW disk to create and attach.
18 --raw-size <ARG> Size of RAW disk in GB.
19 --extra-disk <ARG> Add an extra disk, starting with /dev/vdd
20 --cloud-init-file <ARG> Additional file for the cloud-init data
21EOF
22exit 1
23}
24
25short_opts="h"
26long_opts="id:,ssh_port,disk-gb:,mem:,bzr-automated-ec2-builds:,cloud-config:,user-data:,kernel-url:,img-url:,raw-disk:,raw-size:,smp:,extra-disk:,cloud-init-file:,help"
27getopt_out=$(getopt --name "${0##*/}" \
28 --options "${short_opts}" --long "${long_opts}" -- "$@") &&
29 eval set -- "${getopt_out}" ||
30 usage
31
32builder_id=$(uuidgen)
33uuid=${builder_id}
34bname="server"
35size_gb=15
36mem=512
37smp_opt="4"
38ud=""
39cloud_config=""
40img_loc="${BUILDER_CLOUD_IMAGE:-http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64-disk1.img}"
41KVM_PID=""
42
43while [ $# -ne 0 ]; do
44 cur=${1}; next=${2};
45 case "$cur" in
46 --id) id="$2"; shift;;
47 --disk-gb) size_gb="$2"; shift;;
48 --mem) mem="$2"; shift;;
49 --cloud-config) ccloud="$2"; shift;;
50 --user-data) ud="$2"; shift;;
51 --img-url) img_loc="$2"; shift;;
52 --raw-disk) raw_disk="$2"; shift;;
53 --raw-size) raw_size="$2"; shift;;
54 --smp) smp_opts="$2"; shift;;
55 --extra-disk) [ -z "${extra_disk}" ] && extra_disk=$2 || extra_disk="${extra_disk} $2"; shift;;
56 --cloud-init-file) [ -z "${cloud_init_files}" ] && cloud_init_files=$2 || cloud_init_files="${cloud_init_files} $2"; shift;;
57 -h|--help) usage; exit 0;;
58 --) shift; break;;
59 esac
60 shift;
61done
62
63work_d="$(mktemp -d /tmp/kvm-builder.XXXX)"
64kvm_pidfile="$(mktemp --tmpdir=${work_d})"
65
66error() { echo "$@" 1>&2; }
67cleanup() {
68 [ -n "${KVM_PID}" ] && kill -9 ${KVM_PID};
69 [ -n "${TAIL_PID}" ] && kill -9 ${TAIL_PID};
70 rm -rf "${work_d}";
71}
72fail() { error "$@"; cleanup; exit 1; }
73debug() { error "$(date -R):" "$@"; }
74sysfail() { fail "Failure in commands detected; purging "; }
75
76# Make sure that we kill everything
77trap sysfail SIGINT SIGTERM
78
79[ -z "${ud}" ] && fail "Must define user-data script via --user-data"
80[ -z "${ccloud}" ] && fail "Must define cloud-config script via --cloud-config"
81
82debug "Creating Cloud-Init configuration..."
83write_mime_args=(
84 -o "${work_d}/user-data.txt"
85 "${ccloud}"
86 "${ud}")
87write_mime_args+=(${cloud_init_files[@]})
88write_mime_location="$(which write-mime-multipart)"
89if which python3 > /dev/null; then
90 "${write_mime_location}" ${write_mime_args[@]} || fail "Unable to create user-data"
91else
92 python "${write_mime_location}" ${write_mime_args[@]} || fail "Unable to create user-data"
93fi
94
95echo "instance-id: $(uuidgen)" > "${work_d}/meta-data"
96echo "local-hostname: builder" >> "${work_d}/meta-data"
97
98debug "Creating Seed for Cloud-Init..."
99"${0%/*}/make-seed.sh" "${work_d}/seed.img" "${work_d}/user-data.txt" "${work_d}/meta-data" ||
100 fail "Failed to create Configruation ISO"
101
102# Place the image in place
103debug "Build image location is ${img_loc}"
104if [[ "${img_loc}" =~ "http" ]]; then
105 debug "Fetching cloud image from ${img_loc}"
106 curl -s -o "${work_d}/img-${builder_id}" "${img_loc}" ||
107 fail "Unable to fetch pristine image from '${img_loc}'"
108else
109 cp "${img_loc}" "${work_d}/img-${builder_id}" ||
110 fail "Unable to copy '${img_loc}'"
111fi
112
113debug "Adding ${size_gb}G to image size"
114qemu-img resize "${work_d}/img-${builder_id}" +"${size_gb}G" ||
115 fail "Unable to resize image to ${size_gb}G"
116
117if [ -n "${raw_disk}" -a ! -e "${raw_disk}" ]; then
118 if [ -n "${raw_size}" ]; then
119 dd if=/dev/zero of=${raw_disk} bs=1k count=1 seek=$((${raw_size} * 1024000)) &&
120 debug "Create new raw disk" ||
121 fail "Unable to create raw disk"
122 else
123 fail "Undefined raw disk size"
124 fi
125else
126 debug "Using existing raw disk."
127fi
128
129
130debug "________________________________________________"
131debug "Launching instance..."
132kvm_cmd=(
133 ${QEMU_COMMAND:-kvm}
134 -name ${uuid}
135 -drive file=${work_d}/img-${builder_id},if=virtio,bus=0,cache=unsafe,unit=0
136 -drive file=${raw_disk},if=virtio,format=raw,bus=0,unit=1
137 -drive file=${work_d}/seed.img,if=virtio,media=cdrom,bus=0,cache=unsafe,unit=2
138 -net nic,model=virtio
139 -net user
140 -no-reboot
141 -display none
142 -daemonize
143 -serial file:${work_d}/console.log
144 -pidfile ${kvm_pidfile}
145 )
146kvm_cmd+=(${QEMU_ARGS[@]})
147
148# Arch independant stuff
149if [[ "$(uname -p)" =~ "ppc64" ]]; then
150 # Use more memory for building on PPC64
151 kvm_cmd+=(-m 4G)
152else
153 kvm_cmd+=(-smp ${smp_opt} -m ${mem})
154fi
155
156# Allow for kernel and append
157if [ -n "${QEMU_KERNEL}" ]; then
158 root="/dev/vda1"
159 if [[ "$(uname -p)" =~ "ppc64" ]]; then
160 root="/dev/vda"
161 fi
162 kvm_cmd+=(-kernel ${QEMU_KERNEL}
163 -append "earlyprintk root=${root} console=hvc0"
164 )
165fi
166
167unit_c=3
168for disk in ${extra_disk}
169do
170 if [[ $(file ${disk}) =~ (disk|qcow|QCOW|vmdk|VMDK|vdi|VDI) ]]; then
171 debug "Adding extra disk $disk to KVM configuration"
172 kvm_cmd+=(-drive file=${extra_disk},if=virtio,bus=1,unit=${unit_c})
173 else
174 debug "Adding extra disk as a raw formated disk"
175 kvm_cmd+=(-drive file=${extra_disk},if=virtio,format=raw,bus=1,unit=${unit_c})
176 fi
177 unit_c=$((unit_c+1))
178done
179
180debug "KVM command is: ${kvm_cmd[@]}"
181"${kvm_cmd[@]}" ||
182 fail "Failed to launch KVM image\n${kvm_out}"
183
184read KVM_PID < ${kvm_pidfile}
185debug "KVM PID is: ${KVM_PID}"
186
187tail -f "${work_d}/console.log" &
188TAIL_PID=$!
189
190# Wait on the pid until the max timeout
191count=0
192max_count=${MAX_CYCLES:-720}
193while $(ps ${KVM_PID} > /dev/null 2>&1)
194do
195 sleep 10
196 count=$((count + 1))
197 if [ "${count}" -gt "${max_count}" ]; then
198 kill -15 ${KVM_PID}
199 debug "Build timed out...killing PID ${KVM_PID}"
200 fi
201done
202
203debug "________________________________________________"
204debug "KVM PID has ended. Work is done"
205kill -15 ${TAIL_PID}
206
207unset KVM_PID
208unset TAIL_PID
209
210[ -n "${raw_disk}" ] &&
211 debug "Extracting raw tarball" &&
212 { tar xvvf "${raw_disk}" || /bin/true; }
213
214[ ! -e success ] &&
215 fail "Tarball contents reported failure"
216
217cp "${work_d}/console.log" .
218
219# Wait for Cloud-Init to finish any work
220debug "Cleaning up..."
221cleanup
222exit 0
0223
=== added file 'maas_config.sh'
--- maas_config.sh 1970-01-01 00:00:00 +0000
+++ maas_config.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,75 @@
1#!/bin/bash
2short_opts="h"
3long_opts="distro:,stream:,maas-branch:,out:,template:,serial:,local:,base-name:,out_d:"
4getopt_out=$(getopt --name "${0##*/}" \
5 --options "${short_opts}" --long "${long_opts}" -- "$@") &&
6 eval set -- "${getopt_out}" || { echo "BAD INVOCATION!"; usage; exit 1; }
7
8usage() {
9 cat <<EOM
10${0##/} - Populated values in build temple.
11
12 Required:
13 --distro Distro code name, i.e. precise
14 --template Template file
15 --stream Stream, i.e. daily, release
16 --base-name The name of the file to work on
17 --serial The build serial
18 --out The output file
19 --out_d Where to stuff the output files
20
21 Optional:
22 --maas-branch bzr branch for maas image code
23EOM
24}
25
26
27fail() { echo "${@}" 2>&1; exit 1;}
28
29serial="${serial:-$(date +%Y%m%d)}"
30maas_branch="${maas_branch:-http://bazaar.launchpad.net/~smoser/maas/maas.ubuntu.com.images-ephemeral}"
31template_f="${PWD}/img-maas.tmpl"
32
33while [ $# -ne 0 ]; do
34 cur=${1}; next=${2};
35 case "$cur" in
36 --distro) distro=$2; shift;;
37 --stream) stream=$2; shift;;
38 --local) local_d=$2; shift;;
39 --maas-branch) maas_branch=$2; shift;;
40 --base-name) base_name=$2; shift;;
41 --template) template_f=$2; shift;;
42 --out) out_f=$2; shift;;
43 --out_d) out_d=$2; shift;;
44 --) shift; break;;
45 esac
46 shift;
47done
48
49fail_usage() { fail "Must define $@"; }
50
51[ -z "${distro}" ] && fail_usage "--distro"
52[ -z "${stream}" ] && fail_usage "--stream"
53[ -z "${local_d}" ] && fail_usage "--local"
54[ -z "${out_f}" ] && fail_usage "--out"
55[ -z "${out_d}" ] && fail_usage "--out_d"
56[ -z "${base_name}" ] && fail_usage "--base-name"
57
58case "$distro" in
59 trusty) arches="${ARCH_TYPE:-i386 amd64 armhf}";
60 [[ "$(uname -m)" =~ ppc64 ]] && arches="ppc64el";;
61 *) arches="${ARCH_TYPE:-i386 amd64 armhf}";;
62esac
63
64sed -e "s,%d,${distro},g" \
65 -e "s,%S,${stream},g" \
66 -e "s,%M,${maas_branch},g" \
67 -e "s,%D,${local_d},g" \
68 -e "s,%B,${base_name},g" \
69 -e "s,%s,${serial},g" \
70 -e "s,%O,${out_d},g" \
71 -e "s,%A,${arches},g" \
72 ${template_f} > ${out_f} ||
73 fail "Unable to write template file"
74
75exit 0
076
=== added file 'make-seed.sh'
--- make-seed.sh 1970-01-01 00:00:00 +0000
+++ make-seed.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,147 @@
1#!/bin/bash
2
3VERBOSITY=0
4TEMP_D=""
5DEF_DISK_FORMAT="raw"
6DEF_FILESYSTEM="iso9660"
7
8error() { echo "$@" 1>&2; }
9errorp() { printf "$@" 1>&2; }
10fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
11failp() { [ $# -eq 0 ] || errorp "$@"; exit 1; }
12
13Usage() {
14 cat <<EOF
15Usage: ${0##*/} [ options ] output user-data [meta-data]
16
17 Create a disk for cloud-init to utilize nocloud
18
19 options:
20 -h | --help show usage
21 -d | --disk-format D disk format to output. default: raw
22 -f | --filesystem F filesystem format (vfat or iso), default: iso9660
23
24 -i | --interfaces F write network interfaces file into metadata
25 -m | --dsmode M add 'dsmode' ('local' or 'net') to the metadata
26 default in cloud-init is 'net', meaning network is
27 required.
28
29 Example:
30 * cat my-user-data
31 #cloud-config
32 password: passw0rd
33 chpasswd: { expire: False }
34 ssh_pwauth: True
35 * echo "instance-id: \$(uuidgen || echo i-abcdefg)" > my-meta-data
36 * ${0##*/} my-seed.img my-user-data my-meta-data
37EOF
38}
39
40bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; exit 1; }
41cleanup() {
42 [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
43}
44
45debug() {
46 local level=${1}; shift;
47 [ "${level}" -gt "${VERBOSITY}" ] && return
48 error "${@}"
49}
50
51short_opts="hi:d:f:m:o:v"
52long_opts="disk-format:,dsmode:,filesystem:,help,interfaces:,output:,verbose"
53getopt_out=$(getopt --name "${0##*/}" \
54 --options "${short_opts}" --long "${long_opts}" -- "$@") &&
55 eval set -- "${getopt_out}" ||
56 bad_Usage
57
58## <<insert default variables here>>
59output=""
60userdata=""
61metadata=""
62filesystem=$DEF_FILESYSTEM
63diskformat=$DEF_DISK_FORMAT
64interfaces=_unset
65dsmode=""
66
67
68while [ $# -ne 0 ]; do
69 cur=${1}; next=${2};
70 case "$cur" in
71 -h|--help) Usage ; exit 0;;
72 -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
73 -d|--disk-format) diskformat=$next; shift;;
74 -f|--filesystem) filesystem=$next; shift;;
75 -m|--dsmode) dsmode=$next; shift;;
76 -i|--interfaces) interfaces=$next; shift;;
77 --) shift; break;;
78 esac
79 shift;
80done
81
82## check arguments here
83## how many args do you expect?
84[ $# -ge 1 ] || bad_Usage "must provide output, userdata"
85[ $# -le 3 ] || bad_Usage "confused by additional args"
86
87output=$1
88userdata=$2
89metadata=$3
90
91[ -n "$metadata" -a "${interfaces}" != "_unset" ] &&
92 fail "metadata and --interfaces are incompatible"
93[ -n "$metadata" -a -n "$dsmode" ] &&
94 fail "metadata and dsmode are incompatible"
95[ "$interfaces" = "_unset" -o -r "$interfaces" ] ||
96 fail "$interfaces: not a readable file"
97
98TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
99 fail "failed to make tempdir"
100trap cleanup EXIT
101
102if [ -n "$metadata" ]; then
103 cp "$metadata" "$TEMP_D/meta-data" || fail "$metadata: failed to copy"
104else
105 {
106 echo "instance-id: iid-local01"
107 [ -n "$dsmode" ] && echo "dsmode: $dsmode"
108 [ -n "$interfaces" ] && echo "interfaces: |" &&
109 sed 's,^, ,' "$interfaces"
110 } > "$TEMP_D/meta-data"
111fi
112
113if [ "$userdata" = "-" ]; then
114 cat > "$TEMP_D/user-data" || fail "failed to read from stdin"
115else
116 cp "$userdata" "$TEMP_D/user-data" || fail "$userdata: failed to copy"
117fi
118
119## alternatively, create a vfat filesystem with same files
120img="$TEMP_D/seed.img"
121truncate --size 100K "$img" || fail "failed truncate image"
122
123case "$filesystem" in
124 iso9660|iso)
125 genisoimage -output "$img" -volid cidata \
126 -joliet -rock "$TEMP_D/user-data" "$TEMP_D/meta-data" \
127 > "$TEMP_D/err" 2>&1 ||
128 { cat "$TEMP_D/err" 1>&2; fail "failed to genisoimage"; }
129 ;;
130 vfat)
131 mkfs.vfat -n cidata "$img" || fail "failed mkfs.vfat"
132 mcopy -oi "$img" "$TEMP_D/user-data" "$TEMP_D/meta-data" :: ||
133 fail "failed to copy user-data, meta-data to img"
134 ;;
135 *) fail "unknown filesystem $filesystem";;
136esac
137
138[ "$output" = "-" ] && output="$TEMP_D/final"
139qemu-img convert -f raw -O "$diskformat" "$img" "$output" ||
140 fail "failed to convert to disk format $diskformat"
141
142[ "$output" != "$TEMP_D/final" ] || { cat "$output" && output="-"; } ||
143 fail "failed to write to -"
144
145error "wrote ${output} with filesystem=$filesystem and diskformat=$diskformat"
146# vi: ts=4 noexpandtab
147
0148
=== added file 'overlay.sh'
--- overlay.sh 1970-01-01 00:00:00 +0000
+++ overlay.sh 2018-05-31 04:33:07 +0000
@@ -0,0 +1,23 @@
1#!/bin/bash
2# Overlays a new branch over this branch. This allows you to reuse code
3# from this branch against a development branch or a private branch.
4
5my_script_path=$(readlink -f ${0})
6my_s_dir=$(dirname ${my_script_path})
7
8source "${my_s_dir}/functions/bzr_check.sh"
9source "${my_s_dir}/functions/common.sh"
10
11rsync_merge() {
12 debug "Merging ${1} with private"
13 [ -d ${1} ] || fail "no such directory ${1} for merging!"
14 rsync -av ${1}/* ${my_s_dir} ||
15 fail "failed to merge ${1}"
16}
17
18for i in ${@}
19do
20 check_tmp=$(mktemp --directory --tmpdir=${my_s_dir} bzrbranch.XXX)
21 check_branch ${i} ${check_tmp}
22 rsync_merge ${check_tmp}
23done
024
=== added directory 'pylib'
=== added directory 'pylib/changelogger'
=== added file 'pylib/changelogger.py'
--- pylib/changelogger.py 1970-01-01 00:00:00 +0000
+++ pylib/changelogger.py 2018-05-31 04:33:07 +0000
@@ -0,0 +1,222 @@
1from __future__ import print_function
2
3import logging
4import re
5import requests
6import subprocess
7from debian.changelog import (Changelog)
8
9# http://changelogs.ubuntu.com/changelogs/pool/main/l/linux-meta/linux-meta_4.2.0.17.19/changelog
10changelog_url_base = \
11 "http://changelogs.ubuntu.com/changelogs/pool/{}/{}/{}/{}_{}/changelog"
12
13
14class ChangeDelta(object):
15
16 def __init__(self, pkg, changelog=None):
17 self._pkg = pkg
18 self._changelogs = changelog
19
20 def _set_pkg(self, pkg):
21 if pkg is not None:
22 self._pkg = str(pkg)
23
24 def _get_pkg(self):
25 return self._pkg
26
27 pkg = property(_get_pkg, _set_pkg)
28
29 def _get_changelogs(self):
30 try:
31 return self._changelogs
32 except KeyError:
33 return []
34
35 def _set_changelogs(self, changelogs):
36 self._changelogs = changelogs
37
38 changelogs = property(_get_changelogs, _set_changelogs)
39
40 def iter_changelogs(self, text=False):
41 for block in self.changelogs:
42 if text:
43 yield str(block)
44 else:
45 yield block
46
47 def _get_bug_cves(self):
48 """extract information from the changelog block"""
49 mappings, bugs, cves = ({}, [], [])
50 cve_re = re.compile("CVE-\d+-\d+")
51 bug_re = re.compile("\(LP:.#(\d+)\)")
52
53 for block in self.iter_changelogs():
54 _block = str(block)
55 cves_in_block = cve_re.findall(_block)
56 cves.extend(cves_in_block)
57
58 bugs_in_block = bug_re.findall(_block)
59 bugs.extend(bugs_in_block)
60
61 ver = str(block.version)
62 mappings[ver] = {'cves': cves_in_block,
63 'bugs': bugs_in_block}
64
65 self.mappings = mappings
66 self.cves = cves
67 self.bugs = bugs
68
69 def _get_cves(self):
70 self._get_bug_cves()
71 return self._cves
72
73 def _set_cves(self, cves):
74 self._cves = cves
75
76 cves = property(_get_cves, _set_cves)
77
78 def cve_in_delta(self):
79 if len(self.cves) > 0:
80 return True
81 return False
82
83 def _get_bugs(self):
84 self._get_bug_cves()
85 return self._bugs
86
87 def _set_bugs(self, bugs):
88 self._bugs = bugs
89
90 bugs = property(_get_bugs, _set_bugs)
91
92 def _get_min_version(self):
93 if self.changelogs:
94 return self.changelogs[-1].version
95
96 min_version = property(_get_min_version)
97
98 def _get_max_version(self):
99 if self.changelogs:
100 return self.changelogs[0].version
101
102 max_version = property(_get_max_version)
103
104 def _get_mappings(self):
105 try:
106 return self._mappings
107 except KeyError:
108 return {}
109
110 def _set_mappings(self, mapping):
111 self._mappings = mapping
112
113 mappings = property(_get_mappings, _set_mappings)
114
115 def format_changelogs(self):
116 changeblocks = "\n".join(self.iter_changelogs(text=True))
117 return changeblocks
118
119 def __str__():
120 return self.format_changelogs()
121
122
123class ReadChangeLog(Changelog):
124
125 def __init__(self, pkg, version):
126 self.logger = logging.getLogger("__changelog_{}__".format(pkg))
127 logging.basicConfig(format=
128 '%(asctime)s %(levelname)s - [PARSING {}] %(message)s'.format(
129 pkg))
130 self.logger.setLevel(logging.DEBUG)
131 self.logger.debug("Parsing changelog for {}".format(version))
132
133 ch_url = self.get_changelog_url(pkg, version)
134 self.logger.debug("URL: {}".format(ch_url))
135 try:
136 raw_changelog = self.get_changelog_from_url(ch_url)
137 Changelog.__init__(self, raw_changelog)
138 except Exception as e:
139 self.logger.debug("Failed to parse changelog!\n{}".format(e))
140
141 self.min_version = self._blocks[-1].version
142 self.max_version = self.version
143
144 self.logger.debug("Opened changelog:")
145 self.logger.debug(" Versions {} through {}".format(self.min_version,
146 self.max_version))
147
148 def get_changelog_url(self, pkg, version, pocket='main', url=None):
149 """Return the changelog URL"""
150
151 url = url or changelog_url_base
152 pdir = pkg[0]
153 if pkg.startswith("lib"):
154 pdir = pkg[:4]
155 return url.format(pocket, pdir, pkg, pkg, version)
156
157 def get_changelog_from_url(self, url):
158 """Fetch the change log"""
159 try:
160 chlog = requests.get(url)
161 if chlog.status_code == requests.codes.ok:
162 return chlog.text
163 else:
164 chlog.raise_for_status()
165
166 except requests.exceptions.HTTPError as e:
167 self.logger.critical("Failed to fetch changelog at {}:\n{}".format(
168 url, e))
169
170 def compare_versions(self, v1, operator, v2):
171 """Dirty, slow hack to compare versions"""
172 cmd = ['/usr/bin/dpkg', '--compare-versions', str(v1), str(operator),
173 str(v2)]
174 try:
175 subprocess.check_call(cmd)
176 except subprocess.CalledProcessError as e:
177 return False
178
179 return True
180
181 def iter_changeblocks(self):
182 """Iterate over the change logs"""
183 for block in self._blocks:
184 yield block
185
186 def get_changes_between(self, minv=None, maxv=None, commits=None):
187 """Get the changes between two versions"""
188 blocks = []
189 # Don't waste CPU time if we are getting the whole log
190 if minv is None and maxv is None:
191 for block in self.iter_changeblocks():
192 blocks.append(block)
193
194 # Now deal with changes between
195 minver = minv or self.min_version
196 maxver = maxv or self.max_version
197
198 # Allow for comparing the latest version against arbitrary counts
199 # i.e. you don't have to know the prior version
200 if minver <= -1:
201 minver = self.versions[(abs(minver) - 1)]
202
203 if minv or maxv:
204 for block in self.iter_changeblocks():
205 bver = block.version
206 if minv:
207 if not self.compare_versions(bver, 'ge', minver):
208 continue
209 if not maxv:
210 blocks.append(block)
211 elif self.compare_versions(bver, 'le', maxver):
212 blocks.append(block)
213 elif maxv:
214 if not self.compare_versions(bver, 'le', maxver):
215 continue
216 if not minv:
217 blocks.append(block)
218 elif self.compare_versions(bver, 'ge', minver):
219 blocks.append(block)
220
221 ret = ChangeDelta(self.package, blocks)
222 return ret
0223
=== added file 'pylib/changelogger/ChangeLogger.py'
--- pylib/changelogger/ChangeLogger.py 1970-01-01 00:00:00 +0000
+++ pylib/changelogger/ChangeLogger.py 2018-05-31 04:33:07 +0000
@@ -0,0 +1,222 @@
1from __future__ import print_function
2
3import logging
4import re
5import requests
6import subprocess
7from debian.changelog import (Changelog)
8
9# http://changelogs.ubuntu.com/changelogs/pool/main/l/linux-meta/linux-meta_4.2.0.17.19/changelog
10changelog_url_base = \
11 "http://changelogs.ubuntu.com/changelogs/pool/{}/{}/{}/{}_{}/changelog"
12
13
14class ChangeDelta(object):
15
16 def __init__(self, pkg, changelog=None):
17 self._pkg = pkg
18 self._changelogs = changelog
19
20 def _set_pkg(self, pkg):
21 if pkg is not None:
22 self._pkg = str(pkg)
23
24 def _get_pkg(self):
25 return self._pkg
26
27 pkg = property(_get_pkg, _set_pkg)
28
29 def _get_changelogs(self):
30 try:
31 return self._changelogs
32 except KeyError:
33 return []
34
35 def _set_changelogs(self, changelogs):
36 self._changelogs = changelogs
37
38 changelogs = property(_get_changelogs, _set_changelogs)
39
40 def iter_changelogs(self, text=False):
41 for block in self.changelogs:
42 if text:
43 yield str(block)
44 else:
45 yield block
46
47 def _get_bug_cves(self):
48 """extract information from the changelog block"""
49 mappings, bugs, cves = ({}, [], [])
50 cve_re = re.compile("CVE-\d+-\d+")
51 bug_re = re.compile("\(LP:.#(\d+)\)")
52
53 for block in self.iter_changelogs():
54 _block = str(block)
55 cves_in_block = cve_re.findall(_block)
56 cves.extend(cves_in_block)
57
58 bugs_in_block = bug_re.findall(_block)
59 bugs.extend(bugs_in_block)
60
61 ver = str(block.version)
62 mappings[ver] = {'cves': cves_in_block,
63 'bugs': bugs_in_block}
64
65 self.mappings = mappings
66 self.cves = cves
67 self.bugs = bugs
68
69 def _get_cves(self):
70 self._get_bug_cves()
71 return self._cves
72
73 def _set_cves(self, cves):
74 self._cves = cves
75
76 cves = property(_get_cves, _set_cves)
77
78 def cve_in_delta(self):
79 if len(self.cves) > 0:
80 return True
81 return False
82
83 def _get_bugs(self):
84 self._get_bug_cves()
85 return self._bugs
86
87 def _set_bugs(self, bugs):
88 self._bugs = bugs
89
90 bugs = property(_get_bugs, _set_bugs)
91
92 def _get_min_version(self):
93 if self.changelogs:
94 return self.changelogs[-1].version
95
96 min_version = property(_get_min_version)
97
98 def _get_max_version(self):
99 if self.changelogs:
100 return self.changelogs[0].version
101
102 max_version = property(_get_max_version)
103
104 def _get_mappings(self):
105 try:
106 return self._mappings
107 except KeyError:
108 return {}
109
110 def _set_mappings(self, mapping):
111 self._mappings = mapping
112
113 mappings = property(_get_mappings, _set_mappings)
114
115 def format_changelogs(self):
116 changeblocks = "\n".join(self.iter_changelogs(text=True))
117 return changeblocks
118
119 def __str__():
120 return self.format_changelogs()
121
122
123class ReadChangelog(Changelog):
124
125 def __init__(self, pkg, version):
126 self.logger = logging.getLogger("__changelog_{}__".format(pkg))
127 logging.basicConfig(format=
128 '%(asctime)s %(levelname)s - [PARSING {}] %(message)s'.format(
129 pkg))
130 self.logger.setLevel(logging.DEBUG)
131 self.logger.debug("Parsing changelog for {}".format(version))
132
133 ch_url = self.get_changelog_url(pkg, version)
134 self.logger.debug("URL: {}".format(ch_url))
135 try:
136 raw_changelog = self.get_changelog_from_url(ch_url)
137 Changelog.__init__(self, raw_changelog)
138 except Exception as e:
139 self.logger.debug("Failed to parse changelog!\n{}".format(e))
140
141 self.min_version = self._blocks[-1].version
142 self.max_version = self.version
143
144 self.logger.debug("Opened changelog:")
145 self.logger.debug(" Versions {} through {}".format(self.min_version,
146 self.max_version))
147
148 def get_changelog_url(self, pkg, version, pocket='main', url=None):
149 """Return the changelog URL"""
150
151 url = url or changelog_url_base
152 pdir = pkg[0]
153 if pkg.startswith("lib"):
154 pdir = pkg[:4]
155 return url.format(pocket, pdir, pkg, pkg, version)
156
157 def get_changelog_from_url(self, url):
158 """Fetch the change log"""
159 try:
160 chlog = requests.get(url)
161 if chlog.status_code == requests.codes.ok:
162 return chlog.text
163 else:
164 chlog.raise_for_status()
165
166 except requests.exceptions.HTTPError as e:
167 self.logger.critical("Failed to fetch changelog at {}:\n{}".format(
168 url, e))
169
170 def compare_versions(self, v1, operator, v2):
171 """Dirty, slow hack to compare versions"""
172 cmd = ['/usr/bin/dpkg', '--compare-versions', str(v1), str(operator),
173 str(v2)]
174 try:
175 subprocess.check_call(cmd)
176 except subprocess.CalledProcessError as e:
177 return False
178
179 return True
180
181 def iter_changeblocks(self):
182 """Iterate over the change logs"""
183 for block in self._blocks:
184 yield block
185
186 def get_changes_between(self, minv=None, maxv=None, commits=None):
187 """Get the changes between two versions"""
188 blocks = []
189 # Don't waste CPU time if we are getting the whole log
190 if minv is None and maxv is None:
191 for block in self.iter_changeblocks():
192 blocks.append(block)
193
194 # Now deal with changes between
195 minver = minv or self.min_version
196 maxver = maxv or self.max_version
197
198 # Allow for comparing the latest version against arbitrary counts
199 # i.e. you don't have to know the prior version
200 if minver <= -1:
201 minver = self.versions[(abs(minver) - 1)]
202
203 if minv or maxv:
204 for block in self.iter_changeblocks():
205 bver = block.version
206 if minv:
207 if not self.compare_versions(bver, 'ge', minver):
208 continue
209 if not maxv:
210 blocks.append(block)
211 elif self.compare_versions(bver, 'le', maxver):
212 blocks.append(block)
213 elif maxv:
214 if not self.compare_versions(bver, 'le', maxver):
215 continue
216 if not minv:
217 blocks.append(block)
218 elif self.compare_versions(bver, 'ge', minver):
219 blocks.append(block)
220
221 ret = ChangeDelta(self.package, blocks)
222 return ret
0223
=== added file 'pylib/changelogger/__init__.py'
=== added directory 'pylib/requests'
=== added file 'pylib/requests/__init__.py'
--- pylib/requests/__init__.py 1970-01-01 00:00:00 +0000
+++ pylib/requests/__init__.py 2018-05-31 04:33:07 +0000
@@ -0,0 +1,77 @@
1# -*- coding: utf-8 -*-
2
3# __
4# /__) _ _ _ _ _/ _
5# / ( (- (/ (/ (- _) / _)
6# /
7
8"""
9requests HTTP library
10~~~~~~~~~~~~~~~~~~~~~
11
12Requests is an HTTP library, written in Python, for human beings. Basic GET
13usage:
14
15 >>> import requests
16 >>> r = requests.get('http://python.org')
17 >>> r.status_code
18 200
19 >>> 'Python is a programming language' in r.content
20 True
21
22... or POST:
23
24 >>> payload = dict(key1='value1', key2='value2')
25 >>> r = requests.post("http://httpbin.org/post", data=payload)
26 >>> print(r.text)
27 {
28 ...
29 "form": {
30 "key2": "value2",
31 "key1": "value1"
32 },
33 ...
34 }
35
36The other HTTP methods are supported - see `requests.api`. Full documentation
37is at <http://python-requests.org>.
38
39:copyright: (c) 2014 by Kenneth Reitz.
40:license: Apache 2.0, see LICENSE for more details.
41
42"""
43
44__title__ = 'requests'
45__version__ = '2.3.0'
46__build__ = 0x020300
47__author__ = 'Kenneth Reitz'
48__license__ = 'Apache 2.0'
49__copyright__ = 'Copyright 2014 Kenneth Reitz'
50
51# Attempt to enable urllib3's SNI support, if possible
52try:
53 from .packages.urllib3.contrib import pyopenssl
54 pyopenssl.inject_into_urllib3()
55except ImportError:
56 pass
57
58from . import utils
59from .models import Request, Response, PreparedRequest
60from .api import request, get, head, post, patch, put, delete, options
61from .sessions import session, Session
62from .status_codes import codes
63from .exceptions import (
64 RequestException, Timeout, URLRequired,
65 TooManyRedirects, HTTPError, ConnectionError
66)
67
68# Set default logging handler to avoid "No handler found" warnings.
69import logging
70try: # Python 2.7+
71 from logging import NullHandler
72except ImportError:
73 class NullHandler(logging.Handler):
74 def emit(self, record):
75 pass
76
77logging.getLogger(__name__).addHandler(NullHandler())
078
=== added file 'pylib/requests/adapters.py'
--- pylib/requests/adapters.py 1970-01-01 00:00:00 +0000
+++ pylib/requests/adapters.py 2018-05-31 04:33:07 +0000
@@ -0,0 +1,388 @@
1# -*- coding: utf-8 -*-
2
3"""
4requests.adapters
5~~~~~~~~~~~~~~~~~
6
7This module contains the transport adapters that Requests uses to define
8and maintain connections.
9"""
10
11import socket
12
13from .models import Response
14from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
15from .packages.urllib3.response import HTTPResponse
16from .packages.urllib3.util import Timeout as TimeoutSauce
17from .compat import urlparse, basestring, urldefrag, unquote
18from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
19 prepend_scheme_if_needed, get_auth_from_url)
20from .structures import CaseInsensitiveDict
21from .packages.urllib3.exceptions import MaxRetryError
22from .packages.urllib3.exceptions import TimeoutError
23from .packages.urllib3.exceptions import SSLError as _SSLError
24from .packages.urllib3.exceptions import HTTPError as _HTTPError
25from .packages.urllib3.exceptions import ProxyError as _ProxyError
26from .cookies import extract_cookies_to_jar
27from .exceptions import ConnectionError, Timeout, SSLError, ProxyError
28from .auth import _basic_auth_str
29
30DEFAULT_POOLBLOCK = False
31DEFAULT_POOLSIZE = 10
32DEFAULT_RETRIES = 0
33
34
35class BaseAdapter(object):
36 """The Base Transport Adapter"""
37
38 def __init__(self):
39 super(BaseAdapter, self).__init__()
40
41 def send(self):
42 raise NotImplementedError
43
44 def close(self):
45 raise NotImplementedError
46
47
48class HTTPAdapter(BaseAdapter):
49 """The built-in HTTP Adapter for urllib3.
50
51 Provides a general-case interface for Requests sessions to contact HTTP and
52 HTTPS urls by implementing the Transport Adapter interface. This class will
53 usually be created by the :class:`Session <Session>` class under the
54 covers.
55
56 :param pool_connections: The number of urllib3 connection pools to cache.
57 :param pool_maxsize: The maximum number of connections to save in the pool.
58 :param int max_retries: The maximum number of retries each connection
59 should attempt. Note, this applies only to failed connections and
60 timeouts, never to requests where the server returns a response.
61 :param pool_block: Whether the connection pool should block for connections.
62
63 Usage::
64
65 >>> import requests
66 >>> s = requests.Session()
67 >>> a = requests.adapters.HTTPAdapter(max_retries=3)
68 >>> s.mount('http://', a)
69 """
70 __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
71 '_pool_block']
72
73 def __init__(self, pool_connections=DEFAULT_POOLSIZE,
74 pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
75 pool_block=DEFAULT_POOLBLOCK):
76 self.max_retries = max_retries
77 self.config = {}
78 self.proxy_manager = {}
79
80 super(HTTPAdapter, self).__init__()
81
82 self._pool_connections = pool_connections
83 self._pool_maxsize = pool_maxsize
84 self._pool_block = pool_block
85
86 self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
87
88 def __getstate__(self):
89 return dict((attr, getattr(self, attr, None)) for attr in
90 self.__attrs__)
91
92 def __setstate__(self, state):
93 # Can't handle by adding 'proxy_manager' to self.__attrs__ because
94 # because self.poolmanager uses a lambda function, which isn't pickleable.
95 self.proxy_manager = {}
96 self.config = {}
97
98 for attr, value in state.items():
99 setattr(self, attr, value)
100
101 self.init_poolmanager(self._pool_connections, self._pool_maxsize,
102 block=self._pool_block)
103
104 def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK):
105 """Initializes a urllib3 PoolManager. This method should not be called
106 from user code, and is only exposed for use when subclassing the
107 :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
108
109 :param connections: The number of urllib3 connection pools to cache.
110 :param maxsize: The maximum number of connections to save in the pool.
111 :param block: Block when no free connections are available.
112 """
113 # save these values for pickling
114 self._pool_connections = connections
115 self._pool_maxsize = maxsize
116 self._pool_block = block
117
118 self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
119 block=block)
120
121 def cert_verify(self, conn, url, verify, cert):
122 """Verify a SSL certificate. This method should not be called from user
123 code, and is only exposed for use when subclassing the
124 :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
125
126 :param conn: The urllib3 connection object associated with the cert.
127 :param url: The requested URL.
128 :param verify: Whether we should actually verify the certificate.
129 :param cert: The SSL certificate to verify.
130 """
131 if url.lower().startswith('https') and verify:
132
133 cert_loc = None
134
135 # Allow self-specified cert location.
136 if verify is not True:
137 cert_loc = verify
138
139 if not cert_loc:
140 cert_loc = DEFAULT_CA_BUNDLE_PATH
141
142 if not cert_loc:
143 raise Exception("Could not find a suitable SSL CA certificate bundle.")
144
145 conn.cert_reqs = 'CERT_REQUIRED'
146 conn.ca_certs = cert_loc
147 else:
148 conn.cert_reqs = 'CERT_NONE'
149 conn.ca_certs = None
150
151 if cert:
152 if not isinstance(cert, basestring):
153 conn.cert_file = cert[0]
154 conn.key_file = cert[1]
155 else:
156 conn.cert_file = cert
157
158 def build_response(self, req, resp):
159 """Builds a :class:`Response <requests.Response>` object from a urllib3
160 response. This should not be called from user code, and is only exposed
161 for use when subclassing the
162 :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
163
164 :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
165 :param resp: The urllib3 response object.
166 """
167 response = Response()
168
169 # Fallback to None if there's no status_code, for whatever reason.
170 response.status_code = getattr(resp, 'status', None)
171
172 # Make headers case-insensitive.
173 response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
174
175 # Set encoding.
176 response.encoding = get_encoding_from_headers(response.headers)
177 response.raw = resp
178 response.reason = response.raw.reason
179
180 if isinstance(req.url, bytes):
181 response.url = req.url.decode('utf-8')
182 else:
183 response.url = req.url
184
185 # Add new cookies from the server.
186 extract_cookies_to_jar(response.cookies, req, resp)
187
188 # Give the Response some context.
189 response.request = req
190 response.connection = self
191
192 return response
193
194 def get_connection(self, url, proxies=None):
195 """Returns a urllib3 connection for the given URL. This should not be
196 called from user code, and is only exposed for use when subclassing the
197 :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
198
199 :param url: The URL to connect to.
200 :param proxies: (optional) A Requests-style dictionary of proxies used on this request.
201 """
202 proxies = proxies or {}
203 proxy = proxies.get(urlparse(url.lower()).scheme)
204
205 if proxy:
206 proxy = prepend_scheme_if_needed(proxy, 'http')
207 proxy_headers = self.proxy_headers(proxy)
208
209 if not proxy in self.proxy_manager:
210 self.proxy_manager[proxy] = proxy_from_url(
211 proxy,
212 proxy_headers=proxy_headers,
213 num_pools=self._pool_connections,
214 maxsize=self._pool_maxsize,
215 block=self._pool_block)
216
217 conn = self.proxy_manager[proxy].connection_from_url(url)
218 else:
219 # Only scheme should be lower case
220 parsed = urlparse(url)
221 url = parsed.geturl()
222 conn = self.poolmanager.connection_from_url(url)
223
224 return conn
225
226 def close(self):
227 """Disposes of any internal state.
228
229 Currently, this just closes the PoolManager, which closes pooled
230 connections.
231 """
232 self.poolmanager.clear()
233
234 def request_url(self, request, proxies):
235 """Obtain the url to use when making the final request.
236
237 If the message is being sent through a HTTP proxy, the full URL has to
238 be used. Otherwise, we should only use the path portion of the URL.
239
240 This should not be called from user code, and is only exposed for use
241 when subclassing the
242 :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
243
244 :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
245 :param proxies: A dictionary of schemes to proxy URLs.
246 """
247 proxies = proxies or {}
248 scheme = urlparse(request.url).scheme
249 proxy = proxies.get(scheme)
250
251 if proxy and scheme != 'https':
252 url, _ = urldefrag(request.url)
253 else:
254 url = request.path_url
255
256 return url
257
258 def add_headers(self, request, **kwargs):
259 """Add any headers needed by the connection. As of v2.0 this does
260 nothing by default, but is left for overriding by users that subclass
261 the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
262
263 This should not be called from user code, and is only exposed for use
264 when subclassing the
265 :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
266
267 :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
268 :param kwargs: The keyword arguments from the call to send().
269 """
270 pass
271
272 def proxy_headers(self, proxy):
273 """Returns a dictionary of the headers to add to any request sent
274 through a proxy. This works with urllib3 magic to ensure that they are
275 correctly sent to the proxy, rather than in a tunnelled request if
276 CONNECT is being used.
277
278 This should not be called from user code, and is only exposed for use
279 when subclassing the
280 :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
281
282 :param proxies: The url of the proxy being used for this request.
283 :param kwargs: Optional additional keyword arguments.
284 """
285 headers = {}
286 username, password = get_auth_from_url(proxy)
287
288 if username and password:
289 headers['Proxy-Authorization'] = _basic_auth_str(username,
290 password)
291
292 return headers
293
294 def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
295 """Sends PreparedRequest object. Returns Response object.
296
297 :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
298 :param stream: (optional) Whether to stream the request content.
299 :param timeout: (optional) The timeout on the request.
300 :param verify: (optional) Whether to verify SSL certificates.
301 :param cert: (optional) Any user-provided SSL certificate to be trusted.
302 :param proxies: (optional) The proxies dictionary to apply to the request.
303 """
304
305 conn = self.get_connection(request.url, proxies)
306
307 self.cert_verify(conn, request.url, verify, cert)
308 url = self.request_url(request, proxies)
309 self.add_headers(request)
310
311 chunked = not (request.body is None or 'Content-Length' in request.headers)
312
313 timeout = TimeoutSauce(connect=timeout, read=timeout)
314
315 try:
316 if not chunked:
317 resp = conn.urlopen(
318 method=request.method,
319 url=url,
320 body=request.body,
321 headers=request.headers,
322 redirect=False,
323 assert_same_host=False,
324 preload_content=False,
325 decode_content=False,
326 retries=self.max_retries,
327 timeout=timeout
328 )
329
330 # Send the request.
331 else:
332 if hasattr(conn, 'proxy_pool'):
333 conn = conn.proxy_pool
334
335 low_conn = conn._get_conn(timeout=timeout)
336
337 try:
338 low_conn.putrequest(request.method,
339 url,
340 skip_accept_encoding=True)
341
342 for header, value in request.headers.items():
343 low_conn.putheader(header, value)
344
345 low_conn.endheaders()
346
347 for i in request.body:
348 low_conn.send(hex(len(i))[2:].encode('utf-8'))
349 low_conn.send(b'\r\n')
350 low_conn.send(i)
351 low_conn.send(b'\r\n')
352 low_conn.send(b'0\r\n\r\n')
353
354 r = low_conn.getresponse()
355 resp = HTTPResponse.from_httplib(
356 r,
357 pool=conn,
358 connection=low_conn,
359 preload_content=False,
360 decode_content=False
361 )
362 except:
363 # If we hit any problems here, clean up the connection.
364 # Then, reraise so that we can handle the actual exception.
365 low_conn.close()
366 raise
367 else:
368 # All is well, return the connection to the pool.
369 conn._put_conn(low_conn)
370
371 except socket.error as sockerr:
372 raise ConnectionError(sockerr, request=request)
373
374 except MaxRetryError as e:
375 raise ConnectionError(e, request=request)
376
377 except _ProxyError as e:
378 raise ProxyError(e)
379
380 except (_SSLError, _HTTPError) as e:
381 if isinstance(e, _SSLError):
382 raise SSLError(e, request=request)
383 elif isinstance(e, TimeoutError):
384 raise Timeout(e, request=request)
385 else:
386 raise
387
388 return self.build_response(request, resp)
0389
=== added file 'pylib/requests/api.py'
--- pylib/requests/api.py 1970-01-01 00:00:00 +0000
+++ pylib/requests/api.py 2018-05-31 04:33:07 +0000
@@ -0,0 +1,120 @@
1# -*- coding: utf-8 -*-
2
3"""
4requests.api
5~~~~~~~~~~~~
6
7This module implements the Requests API.
8
9:copyright: (c) 2012 by Kenneth Reitz.
10:license: Apache2, see LICENSE for more details.
11
12"""
13
14from . import sessions
15
16
17def request(method, url, **kwargs):
18 """Constructs and sends a :class:`Request <Request>`.
19 Returns :class:`Response <Response>` object.
20
21 :param method: method for the new :class:`Request` object.
22 :param url: URL for the new :class:`Request` object.
23 :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
24 :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
25 :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
26 :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
27 :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
28 :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
29 :param timeout: (optional) Float describing the timeout of the request in seconds.
30 :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
31 :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
32 :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
33 :param stream: (optional) if ``False``, the response content will be immediately downloaded.
34 :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
35
36 Usage::
37
38 >>> import requests
39 >>> req = requests.request('GET', 'http://httpbin.org/get')
40 <Response [200]>
41 """
42
43 session = sessions.Session()
44 return session.request(method=method, url=url, **kwargs)
45
46
47def get(url, **kwargs):
48 """Sends a GET request. Returns :class:`Response` object.
49
50 :param url: URL for the new :class:`Request` object.
51 :param \*\*kwargs: Optional arguments that ``request`` takes.
52 """
53
54 kwargs.setdefault('allow_redirects', True)
55 return request('get', url, **kwargs)
56
57
58def options(url, **kwargs):
59 """Sends a OPTIONS request. Returns :class:`Response` object.
60
61 :param url: URL for the new :class:`Request` object.
62 :param \*\*kwargs: Optional arguments that ``request`` takes.
63 """
64
65 kwargs.setdefault('allow_redirects', True)
66 return request('options', url, **kwargs)
67
68
69def head(url, **kwargs):
70 """Sends a HEAD request. Returns :class:`Response` object.
71
72 :param url: URL for the new :class:`Request` object.
73 :param \*\*kwargs: Optional arguments that ``request`` takes.
74 """
75
76 kwargs.setdefault('allow_redirects', False)
77 return request('head', url, **kwargs)
78
79
80def post(url, data=None, **kwargs):
81 """Sends a POST request. Returns :class:`Response` object.
82
83 :param url: URL for the new :class:`Request` object.
84 :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
85 :param \*\*kwargs: Optional arguments that ``request`` takes.
86 """
87
88 return request('post', url, data=data, **kwargs)
89
90
91def put(url, data=None, **kwargs):
92 """Sends a PUT request. Returns :class:`Response` object.
93
94 :param url: URL for the new :class:`Request` object.
95 :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
96 :param \*\*kwargs: Optional arguments that ``request`` takes.
97 """
98
99 return request('put', url, data=data, **kwargs)
100
101
102def patch(url, data=None, **kwargs):
103 """Sends a PATCH request. Returns :class:`Response` object.
104
105 :param url: URL for the new :class:`Request` object.
106 :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
107 :param \*\*kwargs: Optional arguments that ``request`` takes.
108 """
109
110 return request('patch', url, data=data, **kwargs)
111
112
113def delete(url, **kwargs):
114 """Sends a DELETE request. Returns :class:`Response` object.
115
116 :param url: URL for the new :class:`Request` object.
117 :param \*\*kwargs: Optional arguments that ``request`` takes.
118 """
119
120 return request('delete', url, **kwargs)
0121
=== added file 'pylib/requests/auth.py'
--- pylib/requests/auth.py 1970-01-01 00:00:00 +0000
+++ pylib/requests/auth.py 2018-05-31 04:33:07 +0000
@@ -0,0 +1,193 @@
1# -*- coding: utf-8 -*-
2
3"""
4requests.auth
5~~~~~~~~~~~~~
6
7This module contains the authentication handlers for Requests.
8"""
9
10import os
11import re
12import time
13import hashlib
14
15from base64 import b64encode
16
17from .compat import urlparse, str
18from .cookies import extract_cookies_to_jar
19from .utils import parse_dict_header
20
21CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
22CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
23
24
25def _basic_auth_str(username, password):
26 """Returns a Basic Auth string."""
27
28 return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
29
30
31class AuthBase(object):
32 """Base class that all auth implementations derive from"""
33
34 def __call__(self, r):
35 raise NotImplementedError('Auth hooks must be callable.')
36
37
38class HTTPBasicAuth(AuthBase):
39 """Attaches HTTP Basic Authentication to the given Request object."""
40 def __init__(self, username, password):
41 self.username = username
42 self.password = password
43
44 def __call__(self, r):
45 r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
46 return r
47
48
49class HTTPProxyAuth(HTTPBasicAuth):
50 """Attaches HTTP Proxy Authentication to a given Request object."""
51 def __call__(self, r):
52 r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
53 return r
54
55
56class HTTPDigestAuth(AuthBase):
57 """Attaches HTTP Digest Authentication to the given Request object."""
58 def __init__(self, username, password):
59 self.username = username
60 self.password = password
61 self.last_nonce = ''
62 self.nonce_count = 0
63 self.chal = {}
64 self.pos = None
65
66 def build_digest_header(self, method, url):
67
68 realm = self.chal['realm']
69 nonce = self.chal['nonce']
70 qop = self.chal.get('qop')
71 algorithm = self.chal.get('algorithm')
72 opaque = self.chal.get('opaque')
73
74 if algorithm is None:
75 _algorithm = 'MD5'
76 else:
77 _algorithm = algorithm.upper()
78 # lambdas assume digest modules are imported at the top level
79 if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
80 def md5_utf8(x):
81 if isinstance(x, str):
82 x = x.encode('utf-8')
83 return hashlib.md5(x).hexdigest()
84 hash_utf8 = md5_utf8
85 elif _algorithm == 'SHA':
86 def sha_utf8(x):
87 if isinstance(x, str):
88 x = x.encode('utf-8')
89 return hashlib.sha1(x).hexdigest()
90 hash_utf8 = sha_utf8
91
92 KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
93
94 if hash_utf8 is None:
95 return None
96
97 # XXX not implemented yet
98 entdig = None
99 p_parsed = urlparse(url)
100 path = p_parsed.path
101 if p_parsed.query:
102 path += '?' + p_parsed.query
103
104 A1 = '%s:%s:%s' % (self.username, realm, self.password)
105 A2 = '%s:%s' % (method, path)
106
107 HA1 = hash_utf8(A1)
108 HA2 = hash_utf8(A2)
109
110 if nonce == self.last_nonce:
111 self.nonce_count += 1
112 else:
113 self.nonce_count = 1
114 ncvalue = '%08x' % self.nonce_count
115 s = str(self.nonce_count).encode('utf-8')
116 s += nonce.encode('utf-8')
117 s += time.ctime().encode('utf-8')
118 s += os.urandom(8)
119
120 cnonce = (hashlib.sha1(s).hexdigest()[:16])
121 noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2)
122 if _algorithm == 'MD5-SESS':
123 HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
124
125 if qop is None:
126 respdig = KD(HA1, "%s:%s" % (nonce, HA2))
127 elif qop == 'auth' or 'auth' in qop.split(','):
128 respdig = KD(HA1, noncebit)
129 else:
130 # XXX handle auth-int.
131 return None
132
133 self.last_nonce = nonce
134
135 # XXX should the partial digests be encoded too?
136 base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
137 'response="%s"' % (self.username, realm, nonce, path, respdig)
138 if opaque:
139 base += ', opaque="%s"' % opaque
140 if algorithm:
141 base += ', algorithm="%s"' % algorithm
142 if entdig:
143 base += ', digest="%s"' % entdig
144 if qop:
145 base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
146
147 return 'Digest %s' % (base)
148
149 def handle_401(self, r, **kwargs):
150 """Takes the given response and tries digest-auth, if needed."""
151
152 if self.pos is not None:
153 # Rewind the file position indicator of the body to where
154 # it was to resend the request.
155 r.request.body.seek(self.pos)
156 num_401_calls = getattr(self, 'num_401_calls', 1)
157 s_auth = r.headers.get('www-authenticate', '')
158
159 if 'digest' in s_auth.lower() and num_401_calls < 2:
160
161 setattr(self, 'num_401_calls', num_401_calls + 1)
162 pat = re.compile(r'digest ', flags=re.IGNORECASE)
163 self.chal = parse_dict_header(pat.sub('', s_auth, count=1))
164
165 # Consume content and release the original connection
166 # to allow our new request to reuse the same one.
167 r.content
168 r.raw.release_conn()
169 prep = r.request.copy()
170 extract_cookies_to_jar(prep._cookies, r.request, r.raw)
171 prep.prepare_cookies(prep._cookies)
172
173 prep.headers['Authorization'] = self.build_digest_header(
174 prep.method, prep.url)
175 _r = r.connection.send(prep, **kwargs)
176 _r.history.append(r)
177 _r.request = prep
178
179 return _r
180
181 setattr(self, 'num_401_calls', 1)
182 return r
183
184 def __call__(self, r):
185 # If we have a saved nonce, skip the 401
186 if self.last_nonce:
187 r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
188 try:
189 self.pos = r.body.tell()
190 except AttributeError:
191 pass
192 r.register_hook('response', self.handle_401)
193 return r
0194
=== added file 'pylib/requests/cacert.pem'
--- pylib/requests/cacert.pem 1970-01-01 00:00:00 +0000
+++ pylib/requests/cacert.pem 2018-05-31 04:33:07 +0000
@@ -0,0 +1,5026 @@
1# This Source Code Form is subject to the terms of the Mozilla Public
2# License, v. 2.0. If a copy of the MPL was not distributed with this
3# file, You can obtain one at http://mozilla.org/MPL/2.0/.
4
5# Issuer: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
6# Subject: CN=GTE CyberTrust Global Root O=GTE Corporation OU=GTE CyberTrust Solutions, Inc.
7# Label: "GTE CyberTrust Global Root"
8# Serial: 421
9# MD5 Fingerprint: ca:3d:d3:68:f1:03:5c:d0:32:fa:b8:2b:59:e8:5a:db
10# SHA1 Fingerprint: 97:81:79:50:d8:1c:96:70:cc:34:d8:09:cf:79:44:31:36:7e:f4:74
11# SHA256 Fingerprint: a5:31:25:18:8d:21:10:aa:96:4b:02:c7:b7:c6:da:32:03:17:08:94:e5:fb:71:ff:fb:66:67:d5:e6:81:0a:36
12-----BEGIN CERTIFICATE-----
13MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYD
14VQQKEw9HVEUgQ29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNv
15bHV0aW9ucywgSW5jLjEjMCEGA1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJv
16b3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEzMjM1OTAwWjB1MQswCQYDVQQGEwJV
17UzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQLEx5HVEUgQ3liZXJU
18cnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0IEds
19b2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrH
20iM3dFw4usJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTS
21r41tiGeA5u2ylc9yMcqlHHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X4
2204Wqk2kmhXBIgD8SFcd5tB8FLztimQIDAQABMA0GCSqGSIb3DQEBBAUAA4GBAG3r
23GwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMWM4ETCJ57NE7fQMh017l9
243PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OFNMQkpw0P
25lZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/
26-----END CERTIFICATE-----
27
28# Issuer: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
29# Subject: CN=Thawte Server CA O=Thawte Consulting cc OU=Certification Services Division
30# Label: "Thawte Server CA"
31# Serial: 1
32# MD5 Fingerprint: c5:70:c4:a2:ed:53:78:0c:c8:10:53:81:64:cb:d0:1d
33# SHA1 Fingerprint: 23:e5:94:94:51:95:f2:41:48:03:b4:d5:64:d2:a3:a3:f5:d8:8b:8c
34# SHA256 Fingerprint: b4:41:0b:73:e2:e6:ea:ca:47:fb:c4:2f:8f:a4:01:8a:f4:38:1d:c5:4c:fa:a8:44:50:46:1e:ed:09:45:4d:e9
35-----BEGIN CERTIFICATE-----
36MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkEx
37FTATBgNVBAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYD
38VQQKExRUaGF3dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlv
39biBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEm
40MCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wHhcNOTYwODAx
41MDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT
42DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3
43dGUgQ29uc3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNl
44cyBEaXZpc2lvbjEZMBcGA1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3
45DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQAD
46gY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl/Kj0R1HahbUgdJSGHg91
47yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg71CcEJRCX
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches