=== added file 'bin/kermit'
--- bin/kermit 1970-01-01 00:00:00 +0000
+++ bin/kermit 2014-05-09 09:53:50 +0000
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+import os
+import pwd
+import sys
+import json
+import urllib
+import urllib2
+
+from argparse import ArgumentParser
+
+def main():
+ parser = ArgumentParser()
+ parser.add_argument("-c", "--commitdiff", required=True)
+ parser.add_argument("-t", "--target", required=True)
+ parser.add_argument("-b", "--buglist", required=True)
+ parser.add_argument("-u", "--username",
+ default=pwd.getpwuid(os.getuid())[0])
+ parser.add_argument("-a", "--address",
+ required="KERMIT_URL" not in os.environ,
+ default=os.environ.get("KERMIT_URL", None))
+
+ args = parser.parse_args()
+
+ data = urllib.urlencode({
+ 'username': args.username,
+ 'commitdiff': args.commitdiff,
+ 'target': args.target,
+ 'buglist': args.buglist,
+ 'commitmsg': '',
+ })
+
+ req = urllib2.Request(args.address + '/kermit/submit', data)
+ try:
+ response = urllib2.urlopen(req)
+ except urllib2.HTTPError as err:
+ data = json.loads(err.read())
+ sys.stderr.write("Failed to submit commit to queue: {}\n"
+ .format(data['fail']))
+ sys.exit(1)
+ else:
+ data = json.loads(response.read())
+ sys.stdout.write("Submitted commit to queue as {}\n"
+ .format(data['success']))
+
+if __name__ == "__main__":
+ main()
=== added file 'bin/kermit_queue_runner'
--- bin/kermit_queue_runner 1970-01-01 00:00:00 +0000
+++ bin/kermit_queue_runner 2014-05-09 09:53:50 +0000
@@ -0,0 +1,1095 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+import os
+import pwd
+import json
+import time
+import shlex
+import shutil
+import logging
+import os.path
+import cfgparser
+import traceback
+import collections
+
+from argparse import ArgumentParser
+from StringIO import StringIO
+
+from twisted.python import log
+from twisted.spread import pb
+from twisted.internet import reactor, protocol, defer, task, error
+from twisted.protocols import basic
+from twisted.web.client import FileBodyProducer, Agent
+from twisted.web.http_headers import Headers
+
+
+_CFG_DEFAULTS = {
+ 'filevars': '',
+ 'attempts': '1',
+ 'attempt_interval': '10',
+ 'persist': 'no',
+ 'environment': '',
+ 'dependencies': '',
+}
+
+"""
+Utilities and types.
+
+"""
+
+_StageSpec = collections.namedtuple("_StageSpec", ("deps", "done"))
+
+class AbortedError(Exception): pass
+
+
+def makedirs(dirs):
+ """Make directories, but swallow any exceptions."""
+ try:
+ os.makedirs(dirs)
+ except OSError:
+ pass
+
+
+def _makesockname(dirname):
+ "Make the socket name for the inter-qr comms."
+ return "\0kermit#{}#status".format(dirname)
+
+
+def _send_update(qr, key, status):
+ """
+ Send an update to the kermit server for the given key.
+
+ """
+ logging.debug("Updating {} (running: {})"
+ .format(key, status.get('running', [])))
+ statedir = os.path.join(qr.dir, "logs", key, ".kermit")
+ with open(os.path.join(statedir, "progress"), "w") as file:
+ file.write(json.dumps(status))
+ d = Agent(reactor).request(
+ "POST",
+ os.environ["KERMIT_URL"] + "/kermit/update/{}".format(key),
+ Headers({'Content-Type': ['text/json']}),
+ FileBodyProducer(StringIO(json.dumps(status)))
+ )
+ class BodyReceiver(protocol.Protocol):
+ def __init__(self, response, finished):
+ self.finished = finished
+ self.response = response
+ self.data = []
+ def dataReceived(self, bytes):
+ self.data.append(bytes)
+ def connectionLost(self, reason):
+ d, self.finished = self.finished, None
+ d.callback((self.response, self.data))
+ def _handle(response):
+ finished = defer.Deferred()
+ response.deliverBody(BodyReceiver(response, finished))
+ return finished
+ d.addCallback(_handle)
+ return d
+
+
+"""
+Command output logging between processes.
+
+Classes:
+ - _LoggingProcess: base class for processes that log their output.
+ - _LocalLoggingProcess: logs output to local files (used in primary ws).
+ - _RemoteLoggingProcess: logs output over the LogfileProtocol (used in shared
+ workspaces).
+ - _RemoteLogfileProtocol: the remote (i.e. sending) side of the
+ LogfileProtocol.
+ - _RemoteLogfileFactory: ClientFactory subclass for opening remote logfiles.
+ - _RemoteLogfile: wrapper around the transport to look file-like.
+ - _LocalLogfileProtocol: the local (i.e. receiving) side of the
+ LogfileProtocol.
+ - _LocalLogfileFactory: server Factory for opening remote logfiles.
+
+"""
+
+class _LoggingProcess(protocol.ProcessProtocol):
+ """
+ Wraps processes so that their output is logged to files.
+
+ Maintains two files:
+ - errlog: contains just the stderr content
+ - alllog: contains all output (stdout and stderr interleaved)
+
+ Provides two deferreds:
+ - ready: should be fired by subclasses when the log files are ready. When
+ spawning, the actual spawn should be predicated on this deferred.
+ - deferred: fires when the process completes, with either None, or the
+ failure reason.
+ Wraps processes so that their output is logged into the logs directory.
+
+ Maintains two logs. The first contains just the stderr content, and is
+ suffixed with .err.log; the second contains both stdout and stderr and is
+ suffixed with .log.
+
+ Callers should use the .deferred field to receive the result.
+
+ """
+ def __init__(self, name, logdir, key):
+ self.name = name
+ self.logdir = logdir
+ self.alllog = None
+ self.errlog = None
+ self.ready = defer.Deferred()
+ self.deferred = defer.Deferred()
+ def outReceived(self, data):
+ self.alllog.write(data)
+ def errReceived(self, data):
+ self.alllog.write(data)
+ self.errlog.write(data)
+ def processEnded(self, reason):
+ self.alllog.close()
+ self.errlog.close()
+ self.deferred, d = None, self.deferred
+ logging.debug("Process {} complete: {}"
+ .format(self.name, reason.value.exitCode))
+ if reason.value.exitCode == 0:
+ d.callback(None)
+ else:
+ d.errback(reason)
+
+class _LocalLoggingProcess(_LoggingProcess):
+ """
+ Logs to local files in the log directory.
+
+ alllog is given a .log suffix; errlog is given a .err.log suffix. There
+ are no async events required to set up the logs, so this fires the
+ ready callback on initialisation.
+
+ """
+ def __init__(self, name, logdir, key):
+ _LoggingProcess.__init__(self, name, logdir, key)
+ makedirs(logdir)
+ self.alllog = open(os.path.join(logdir, name + ".log"), "w")
+ self.errlog = open(os.path.join(logdir, name + ".err.log"), "w")
+ self.ready.callback(None)
+
+class _RemoteLoggingProcess(_LoggingProcess):
+ """
+ Logs to a remote file using a _RemoteLogfileFactory, requesting two files.
+
+ The first requested file is the alllog; the second is errlog. The ready
+ deferred fires once the two files are open.
+
+ """
+ def __init__(self, name, logdir, key):
+ _LoggingProcess.__init__(self, name, logdir, key)
+ f = _RemoteLogfileFactory(self.add_log_file, self._use_local)
+ logsockpath = "\0kermit#{}#logs".format(key)
+ reactor.connectUNIX(logsockpath, f)
+ reactor.connectUNIX(logsockpath, f)
+
+ def _use_local(self, failure):
+ failure.trap(error.ConnectionRefusedError)
+ logging.warning("Couldn't log remotely. Logging locally instead")
+ if self.alllog is None:
+ self.alllog = open(os.path.join("logs", self.name + ".log"), "w")
+ elif self.errlog is None:
+ self.errlog = open(os.path.join("logs",
+ self.name + ".err.log"), "w")
+ d, self.ready = self.ready, None
+ d.callback(None)
+ else:
+ raise ValueError("Didn't expect another remote logfile")
+
+ def add_log_file(self, proto):
+ if self.alllog is None:
+ proto.sendLine(self.name + ".log")
+ self.alllog = _RemoteLogfile(proto.transport)
+ elif self.errlog is None:
+ proto.sendLine(self.name + ".err.log")
+ self.errlog = _RemoteLogfile(proto.transport)
+ d, self.ready = self.ready, None
+ d.callback(None)
+ else:
+ raise ValueError("Didn't expect another remote logfile")
+
+
+class _RemoteLogfileProtocol(basic.LineReceiver):
+ def connectionMade(self):
+ try:
+ self.factory.add_log_file(self)
+ except Exception as e:
+ self.factory.clientConnectionFailed(None, e)
+
+class _RemoteLogfileFactory(protocol.ClientFactory):
+ protocol = _RemoteLogfileProtocol
+ def __init__(self, callback, errback):
+ self.add_log_file = callback
+ self.errback = errback
+ def clientConnectionFailed(self, _, reason):
+ logging.debug("Failed to connect to the logfile: {}".format(reason))
+ # Two things:
+ # - Should check if self.deferred is None (as we open 2 files and might
+ # fail on the first)
+ # - Should probably not fail, and instead just log locally
+ self.errback(reason)
+
+class _RemoteLogfile(object):
+ """
+ Wrapper around _RemoteLogfileProtocol transport to make it file-like.
+ """
+ def __init__(self, transport):
+ self._transport = transport
+ def write(self, data):
+ self._transport.write(data)
+ def close(self):
+ self._transport.loseConnection()
+
+class _LocalLogfileProtocol(basic.LineReceiver):
+ def lineReceived(self, line):
+ logging.debug("Opening a log file: {}".format(line))
+ self._file = open(os.path.join(self.factory.op._cfg.get("DEFAULT",
+ "logdir"),
+ line.strip()), "w")
+ self.setRawMode()
+ def rawDataReceived(self, data):
+ self._file.write(data)
+ def connectionLost(self, reason):
+ self._file.close()
+
+class _LocalLogfileFactory(protocol.Factory):
+ protocol = _LocalLogfileProtocol
+ def __init__(self, op):
+ self.op = op
+
+
+"""
+Operation objects and helper classes.
+
+"""
+
+class _RemoteResults(pb.Referenceable):
+ """
+ Allows a shared operation to communicate back to the local operation that
+ spawned it.
+
+ """
+ def __init__(self, op, stages):
+ self._op = op
+ self._stages = set(stages)
+
+ def remote_running(self, stage):
+ logging.debug("Our partner tells us {} has started running"
+ .format(stage))
+ self._op._status['running'].append(stage)
+ self._op._send_update()
+
+ def remote_abort(self, stage):
+ if stage in self._op._status['running']:
+ self._op._status['running'].remove(stage)
+ if stage in self._stages:
+ logging.debug("Our partner tells us {} has aborted".format(stage))
+ self._stages.remove(stage)
+ self._op._not_doing(stage)
+ self._op._send_update()
+
+ def remote_done(self, stage):
+ if stage in self._op._status['running']:
+ self._op._status['running'].remove(stage)
+ if stage in self._stages:
+ logging.debug("Our partner tells us {} has succeeded".format(stage))
+ self._stages.remove(stage)
+ self._op._done(stage)
+ self._op._send_update()
+
+ def remote_fail(self, stage, reason):
+ if stage in self._op._status['running']:
+ self._op._status['running'].remove(stage)
+ logging.debug("Our partner tells us {} failed: {}"
+ .format(stage, reason))
+ if stage in self._stages:
+ self._stages.remove(stage)
+ self._op._fail(stage, reason)
+ self._op._send_update()
+
+
+class _OperationBase(object):
+ """Base class for operations."""
+ def __init__(self, qr, cfgfile, cfg, target, section, status, ws=None):
+ self._qr = qr
+ self._key = cfg.get("DEFAULT", "key")
+ self._cfgfile = cfgfile
+ self._cfg = cfg
+ self._target = target
+ self._section = section
+ self._status = status
+ self._ws = ws
+
+ self._status['started_at'] = 0
+ self._status['completed_at'] = 0
+ self._status['stages'] = []
+ self._status.setdefault('done', [])
+ self._status.setdefault('done_at', {})
+ self._status['failed'] = []
+ self._status['failed_at'] = {}
+ self._status['failed_cleanup'] = False
+ self._status['check_failed'] = False
+ self._status['skipping'] = []
+ self._status['running'] = []
+ self._status['logdir'] = cfg.get("DEFAULT", "logdir")
+
+ opts = dict(cfg.items("DEFAULT"))
+ opts.update(dict(cfg.items(target)))
+ if self._ws:
+ opts.update(dict(cfg.items(self._ws, category="workspace")))
+ self._status['message'] = opts.get("message", "").format(**opts)
+
+ self._opts = opts
+ self._active_proc = None
+
+ def _send_update(self):
+ return None
+
+ def begin(self):
+ pass
+
+ def complete(self):
+ pass
+
+ @defer.inlineCallbacks
+ def _run_stage(self, name, suppress_fail=False, suppress_done=False):
+ logging.debug("Running stage {}".format(name))
+ attempts = self._cfg.geteval(name, "attempts", category="command")
+ interval = self._cfg.geteval(name, "attempt_interval",
+ category="command")
+ self._status['running'].append(name)
+ if not suppress_fail and not suppress_done:
+ yield self._running(name)
+ yield self._send_update()
+ try:
+ # Perform multiple attempts if requested
+ while True:
+ try:
+ yield self._spawn_cmd(name)
+ except Exception:
+ attempts -= 1
+ if attempts <= 0:
+ raise
+ # Sleep a bit between attempts
+ logging.debug("Waiting to retry stage...")
+ yield task.deferLater(reactor, interval, lambda: None)
+ else:
+ break
+ # Also need to track errors in saving the logs
+ yield self._save_logs(name)
+ except Exception as e:
+ logging.exception(e)
+ if not suppress_fail:
+ yield self._fail(name, e)
+ raise e
+ else:
+ if not suppress_done:
+ yield self._done(name)
+ finally:
+ self._status['running'].remove(name)
+ yield self._send_update()
+
+ def _spawn_cmd(self, name):
+ section = self._cfg.section(name, category="command")
+ p = self.logger(name, self._cfg.get("DEFAULT", "logdir"), self._key)
+ myopts = self._opts.copy()
+
+ myopts.update(dict(section.items()))
+
+ # Load any files we need into variables
+ for key, filename in [e.split(':')
+ for e in section.getlist('filevars')]:
+ filename = filename.strip().format(**myopts)
+ with open(filename.strip(), 'r') as file:
+ myopts[key] = file.read()
+
+ env = os.environ.copy()
+ for key, val in [e.split(':')
+ for e in section.getlist('environment')]:
+ env[key.strip()] = val.strip().format(**myopts)
+
+ args = map(lambda s: s.format(**myopts),
+ shlex.split(section.get("command")))
+
+ pwd = section.get("pwd").format(**myopts)
+
+ with open(os.path.join(self._qr.dir, "logs",
+ 'commands.log'), 'a') as file:
+ file.write(str(args) + '\n')
+
+ def _spawn_proc(_):
+ logging.debug("Spawning: {}".format(args))
+ proc = reactor.spawnProcess(p, args[0], args,
+ env=env,
+ path=os.path.join(self._qr.dir, pwd),
+ usePTY=True)
+ # Only track the active process if it's one of the stages we
+ # actually want to be able to abort. We don't want to abort pre
+ # cmds as that will result in aborting a workspace.
+ if name in self._stages:
+ self._active_proc = proc
+ p.ready.addCallbacks(_spawn_proc, p.deferred.errback)
+ def _clean_proc(_):
+ self._active_proc = None
+ return _
+ p.deferred.addBoth(_clean_proc)
+ return p.deferred
+
+ def _load_ws_state(self, ws):
+ statefile = os.path.join(self._qr.dir, ".kermit", ws + ".ws")
+ makedirs(os.path.dirname(statefile))
+ state = {}
+ if os.path.exists(statefile):
+ with open(statefile) as file:
+ state = json.loads(file.read())
+ return state
+
+ def _save_ws_state(self, ws, state):
+ statefile = os.path.join(self._qr.dir, ".kermit", ws + ".ws")
+ makedirs(os.path.dirname(statefile))
+ with open(statefile, "w") as file:
+ file.write(json.dumps(state))
+
+ @defer.inlineCallbacks
+ def setup_workspace(self, ws):
+ state = self._load_ws_state(ws)
+ if not state.get("setup", False):
+ for name in self._section.getlist("setup_cmds"):
+ yield self._run_stage(name, suppress_done=True)
+ else:
+ state["setup"] = True
+ self._save_ws_state(ws, state)
+
+ @defer.inlineCallbacks
+ def abort_workspace(self, ws):
+ for name in self._section.getlist("abort_cmds"):
+ try:
+ yield self._run_stage(name, suppress_fail=True,
+ suppress_done=True)
+ except Exception as e:
+ logging.debug("Failed to abort workspace")
+ logging.exception(e)
+ self._status['failed_abort'] = True
+ self._save_ws_state(ws, {"setup": False})
+
+ @defer.inlineCallbacks
+ def run_pre_cmds(self, suppress_fail=False, suppress_done=False):
+ for name in self._section.getlist("pre_cmds"):
+ yield self._run_stage(name, suppress_fail=suppress_fail,
+ suppress_done=suppress_done)
+ else:
+ self._status['started_at'] = time.time()
+ yield self._send_update()
+
+ @defer.inlineCallbacks
+ def run_post_cmds(self):
+ for name in self._section.getlist("post_cmds"):
+ try:
+ yield self._run_stage(name, suppress_fail=True,
+ suppress_done=True)
+ except Exception:
+ logging.debug("Failed to run post cmds")
+ traceback.print_exc()
+ self._status['failed_cleanup'] = True
+
+
+class _LocalOperation(_OperationBase):
+ logger = _LocalLoggingProcess
+ def __init__(self, qr, cfgfile, cfg, target, section, status):
+ _OperationBase.__init__(self, qr, cfgfile, cfg, target, section, status)
+ self._logconn = None
+ self._calc_stages()
+
+ def _calc_stages(self):
+ section = self._cfg.section(self._target)
+ cmds = section.getlist("commands")
+ requested = self._cfg.getlist("DEFAULT", "stages")
+ workspaces = section.getlist("workspaces", default=[])
+
+ for ws in self._cfg.getlist("DEFAULT", "ws_disable", default=[]):
+ if ws in workspaces:
+ workspaces.remove(ws)
+
+ logging.info("Only considering workspaces: {}".format(workspaces))
+
+ self._status['stages'] = cmds
+
+ # Deferred for each stage (fired when the stage completes)
+ ds = {k: defer.Deferred() for k in cmds}
+ # Mapping of stage to list of deferred it depends on
+ deps = collections.defaultdict(list)
+ # Mapping of workspace to list of stage names that are run in it
+ byws = collections.defaultdict(list)
+ # All stages that are being run
+ allstages = set()
+
+ def add_stage(stage, ws=None):
+ """
+ Add a stage to the list of stages to run.
+
+ Always adds any dependent stages first.
+
+ """
+ section = self._cfg.section(stage, category="command")
+ if ws is None:
+ ws = section.get("workspace", default=None)
+ if ws not in workspaces:
+ # Although this command could run in a separate workspace,
+ # that workspace is not enabled for this target
+ ws = None
+
+ if stage not in byws[ws]:
+ depds = []
+ for dep in section.getlist("dependencies"):
+ if dep not in cmds:
+ logging.error("Unknown stage {}. Ignoring".format(dep))
+ continue
+ add_stage(dep, ws)
+ depds.append(ds[dep])
+
+ byws[ws].append(stage)
+ deps[stage].extend(depds)
+ allstages.add(stage)
+
+ # Remove any results that might exist, since we're redoing it
+ self._status.get('done_at', {}).pop(stage, None)
+ try:
+ self._status.get('done', []).remove(stage)
+ except ValueError:
+ pass
+
+ lastd = None
+ for cmd in cmds:
+ if lastd is not None:
+ deps[cmd].append(lastd)
+ lastd = ds[cmd]
+ # If the step hasn't been requested, skip it always
+ if requested and cmd not in requested:
+ self._status['skipping'].append(cmd)
+ continue
+ section = self._cfg.section(cmd, category='command')
+ # If the step has previous completed, and results persist, skip it
+ if cmd in self._status.get('done', []):
+ if section.geteval("persist"):
+ continue
+ # In all cases, steps might be readded if they are dependencies of
+ # a step that needs to be completed
+ add_stage(cmd)
+
+ # Now for every stage that isn't being run, trigger the deferred
+ for cmd in cmds:
+ if cmd not in allstages:
+ ds[cmd].callback(None)
+
+ self._stages = {
+ k: _StageSpec(defer.DeferredList(deps[k], fireOnOneErrback=True),
+ ds[k])
+ for k in cmds
+ }
+
+ self._byws = byws
+ self._final_deferred = defer.DeferredList(list(ds.values()),
+ fireOnOneErrback=True)
+
+ @defer.inlineCallbacks
+ def run_cmds(self):
+ for name in self._byws[None]:
+ # First wait for any dependencies to complete (in case they
+ # are running in separate, shared workspaces)
+ logging.debug("Waiting for dependencies of {}".format(name))
+ yield self._deps(name)
+ logging.debug("Starting to run {}".format(name))
+ yield self._run_stage(name)
+ else:
+ logging.debug("Waiting for the end of all stages")
+ yield self._final_deferred
+ self._status['completed_at'] = time.time()
+
+ def _running(self, name):
+ pass
+
+ def _done(self, name):
+ if name not in self._status['done']:
+ self._status['done'].append(name)
+ self._status['done_at'][name] = time.time()
+
+ if name in self._stages:
+ self._stages[name].done.callback(None)
+ del self._stages[name]
+
+ def _fail(self, name, reason):
+ if name not in self._status['failed']:
+ self._status['failed'].append(name)
+ self._status['failed_at'][name] = time.time()
+
+ if name in self._stages:
+ self._stages[name].done.errback(reason)
+ del self._stages[name]
+
+ def _not_doing(self, name):
+ if name in self._stages:
+ self._stages[name].done.errback(AbortedError())
+ del self._stages[name]
+
+ def _deps(self, name):
+ return self._stages[name].deps
+
+ @defer.inlineCallbacks
+ def handover(self):
+ self._logconn = reactor.listenUNIX("\0kermit#{}#logs".format(self._key),
+ _LocalLogfileFactory(self))
+ for ws in self._byws:
+ if ws is not None:
+ yield self._handover(ws)
+
+ @defer.inlineCallbacks
+ def _handover(self, ws):
+ section = self._cfg.section(ws, category="workspace")
+ user = section.get("username", pwd.getpwuid(os.getuid())[0])
+ dir = self._cfg.get(self._target, "dir")
+ logging.debug("Handing over to {} (user: {}): {}"
+ .format(ws, user, self._byws[ws]))
+
+ factory = pb.PBClientFactory()
+ reactor.connectUNIX(_makesockname(os.path.join(dir, user)), factory)
+ root = yield factory.getRootObject()
+ yield root.callRemote("handover", _RemoteResults(self, self._byws[ws]),
+ self._cfgfile, ws, self._byws[ws])
+
+ def begin(self):
+ self._status['begin_at'] = time.time()
+
+ def complete(self):
+ if self._logconn:
+ self._logconn.stopListening()
+ self._logconn = None
+
+ @defer.inlineCallbacks
+ def abort(self):
+ for ws in self._byws:
+ if ws is not None:
+ yield self._abort(ws)
+ if self._active_proc:
+ self._active_proc.signalProcess("TERM")
+ if self._logconn:
+ self._logconn.stopListening()
+ self._logconn = None
+
+ @defer.inlineCallbacks
+ def _abort(self, ws):
+ section = self._cfg.section(ws, category="workspace")
+ user = section.get("username", pwd.getpwuid(os.getuid())[0])
+ dir = self._cfg.get(self._target, "dir")
+ logging.debug("Aborting {} (user: {})"
+ .format(ws, user))
+
+ factory = pb.PBClientFactory()
+ reactor.connectUNIX(_makesockname(os.path.join(dir, user)), factory)
+ root = yield factory.getRootObject()
+ yield root.callRemote("abort", self._cfg.get("DEFAULT", "key"), ws)
+
+ def _send_update(self):
+ """
+ Send an update to the kermit server for the given key.
+
+ """
+ return _send_update(self._qr, self._key, self._status)
+
+ @defer.inlineCallbacks
+ def _save_logs(self, name):
+ logs = self._cfg.getlist(name, "savelogs", default=[],
+ category="command")
+ logging.debug("Saving logs: {}".format(logs))
+ for log in logs:
+ log = log.format(**self._opts)
+ shutil.copy(log, self._cfg.get("DEFAULT", "logdir"))
+ yield None
+
+
+class _SharedOperation(_OperationBase):
+ logger = _RemoteLoggingProcess
+ def __init__(self, qr, cfgfile, cfg, target, section, remote_op, stages):
+ _OperationBase.__init__(self, qr, cfgfile, cfg, target, section, {})
+
+ self._remote_op = remote_op
+ self._stages = stages
+
+ @defer.inlineCallbacks
+ def _save_logs(self, name):
+ logs = self._cfg.getlist(name, "savelogs", default=[],
+ category="command")
+ for log in logs:
+ log = log.format(**self._opts)
+ d = defer.Deferred()
+
+ def _transfer_file(proto):
+ proto.sendLine(os.path.basename(log))
+ fp = basic.FileSender()
+ infile = open(log, "r")
+ transfer = fp.beginFileTransfer(infile, proto.transport)
+ transfer.addBoth(lambda _: infile.close())
+ transfer.chainDeferred(d)
+
+ f = _RemoteLogfileFactory(_transfer_file, d.errback)
+
+ logsockpath = "\0kermit#{}#logs".format(self._opts['key'])
+ logging.debug("Saving log {}".format(log))
+ reactor.connectUNIX(logsockpath, f)
+ yield d
+
+ @defer.inlineCallbacks
+ def run_cmds(self):
+ for name in self._stages:
+ logging.debug("Starting to run {}".format(name))
+ yield self._run_stage(name)
+
+ def _running(self, name):
+ return self._remote_op.callRemote("running", name)
+
+ def _done(self, name):
+ return self._remote_op.callRemote("done", name)
+
+ def _fail(self, name, reason):
+ return self._remote_op.callRemote("fail", name, reason)
+
+ @defer.inlineCallbacks
+ def abort(self):
+ for name in self._stages:
+ yield self._remote_op.callRemote("abort", name)
+ if self._active_proc and self._stages:
+ # Only do this if we are still running stages. If there are no
+ # stages, then we are in cleanup, and should continue doing that.
+ self._active_proc.signalProcess("TERM")
+
+
+class QueueRunner(pb.Root):
+ """
+ Maintains a queue of config files to handle, and processes them one by one.
+
+ The QueueRunner can operate in one of two modes:
+ - as a persistent server, which processes queued up commit requests one
+ at a time.
+ - as a client, handing over any commit requests to an already running
+ server (this mode is implemented via a classmethod).
+
+ Once the server completes its entire queue, it exits.
+
+ The server provides updates to the central kermit (running inside EnDroid)
+ whenever progress is made.
+
+ """
+
+ def __init__(self, dir):
+ self.dir = dir
+ self._sockpath = _makesockname(dir)
+ self._workqueues = collections.defaultdict(collections.OrderedDict)
+ self._aborts = []
+ self._jobs = []
+ self._running = set()
+ self.conn = None
+
+ os.chdir(dir)
+ makedirs("logs")
+ makedirs(".kermit")
+
+ observer = log.PythonLoggingObserver()
+ observer.start()
+
+ logging.basicConfig(filename=os.path.join(".kermit", "qr.log"),
+ level=logging.DEBUG,
+ format="%(asctime)s[%(levelname)-3.3s] %(message)s",
+ datefmt="%Y/%m/%d %H:%M:%S")
+ logging.getLogger().addHandler(logging.StreamHandler())
+
+ def enqueue(self, cfgfile):
+ logging.debug("Enqueuing another commit: {}".format(cfgfile))
+ reactor.callWhenRunning(self._prepare, cfgfile)
+
+ def abort(self, key, ws=None):
+ self._aborts.append((key, ws))
+ reactor.callWhenRunning(self._do_aborts)
+
+ def _consider_shutdown(self):
+ logging.debug("Considering shutdown, with following running: {}"
+ .format(self._running))
+ if not self._running:
+ self.shutdown()
+
+ def shutdown(self):
+ logging.info("Queue Runner shutting down")
+ d = self.conn.stopListening()
+ d.addBoth(lambda _: reactor.stop())
+
+ @defer.inlineCallbacks
+ def _prepare(self, cfgfile):
+ cfg = cfgparser.CfgParser(_CFG_DEFAULTS)
+ cfg.read(cfgfile)
+
+ target = cfg.get("DEFAULT", "target")
+ key = cfg.get("DEFAULT", "key")
+ status = {}
+
+ try:
+ status = yield self._check_checksum(cfg)
+
+ operation = _LocalOperation(self, cfgfile, cfg, target,
+ cfg.section(target), status)
+
+ # Hand over any items that should be run in a shared queue
+ yield operation.handover()
+ yield operation._send_update()
+ except Exception as e:
+ logging.error("Failed to prepare job {}".format(key))
+ logging.exception(e)
+
+ status['check_failed'] = str(e)
+
+ try:
+ yield _send_update(self, key, status)
+ except Exception as f:
+ logging.error("Failed to tell kermit we failed to prepare")
+ logging.exception(f)
+ else:
+ self._workqueues[None][key] = operation
+ finally:
+ reactor.callWhenRunning(self._process_ws_queue, None)
+
+ @defer.inlineCallbacks
+ def _process_ws_queue(self, ws):
+ if ws in self._running:
+ return
+ logging.debug("Processing one item from queue: {}"
+ .format(self._workqueues[ws]))
+ if not self._workqueues[ws]:
+ self._running.discard(ws)
+ logging.debug("No work for {}. Will consider shutting down shortly"
+ .format(ws))
+ reactor.callLater(60, self._consider_shutdown)
+ return
+ self._running.add(ws)
+
+ key = iter(self._workqueues[ws]).next()
+ operation = self._workqueues[ws][key]
+
+ operation.begin()
+
+ # Quite a simple function really:
+ # - Run the pre_cmds
+ # - Run each of the main commands, after waiting for its deps to run
+ # - No matter what, run the post commands
+ # - Then finally, schedule another run of the function
+
+ try:
+ if ws in operation._cfg.getlist("DEFAULT", "ws_reset", default=[]):
+ logging.info("Resetting workspace as requested")
+ yield operation.abort_workspace(ws)
+ yield operation.setup_workspace(ws)
+
+ # Run the "check" commands first to ensure everything is ready
+ try:
+ logging.debug("Preparing existing workspace {}".format(ws))
+ yield operation.run_pre_cmds(suppress_fail=bool(ws),
+ suppress_done=bool(ws))
+ except Exception:
+ if ws:
+ logging.debug("Exception occurred; resetting workspace {}"
+ .format(ws))
+ yield operation.abort_workspace(ws)
+ logging.debug("Trying to setup workspace {}".format(ws))
+ yield operation.setup_workspace(ws)
+ else:
+ raise
+
+ # This is the main command loop
+ yield operation.run_cmds()
+ except Exception as f:
+ # We'll already have notified kermit of the failure
+ logging.exception(f)
+ # But notify guys helping us to not bother anymore, or let the
+ # main ws know that the rest of the steps have been aborted.
+ try:
+ yield operation.abort()
+ except Exception as e:
+ logging.error("Failed to abort")
+ logging.exception(e)
+ # But just swallow the error now
+
+ finally:
+ # Run the post commands no matter what
+ yield operation.run_post_cmds()
+ operation.complete()
+ self._workqueues[ws].pop(key, None)
+
+ # Make sure to try to process another item from the queue
+ self._running.discard(ws)
+ reactor.callWhenRunning(self._process_ws_queue, ws)
+
+ def _do_aborts(self):
+ if not self.conn:
+ return
+ logging.debug("Aborting: {}".format(self._aborts))
+ logging.debug("Queues: {}".format(self._workqueues))
+ for key, ws in self._aborts:
+ logging.warning("Aborting {} in {}".format(key, ws))
+ if key in self._workqueues.get(ws, []):
+ self._workqueues[ws][key].abort()
+ del self._workqueues[ws][key]
+ del self._aborts[:]
+
+ def _check_checksum(self, cfg):
+ """
+ Load existing data for a commit (if any) after checking the checksum.
+
+ The checksum for the 'commitdiff' specified in the given configuration
+ is checked against any stored checksum from a previous run of the
+ same commit.
+
+ This function returns a deferred that will fire with the existing
+ status information, or an empty dict if none exists or the checksums
+ don't match.
+
+ """
+ statedir = os.path.join(self.dir, "logs",
+ cfg.get("DEFAULT", "key"), ".kermit")
+ makedirs(statedir)
+
+ oldchecksum, progress = None, None
+ if os.path.exists(os.path.join(statedir, "checksum")):
+ with open(os.path.join(statedir, "checksum")) as file:
+ oldchecksum = file.read().strip()
+ if os.path.exists(os.path.join(statedir, "progress")):
+ with open(os.path.join(statedir, "progress")) as file:
+ progress = json.loads(file.read())
+
+ if not os.path.exists(cfg.get("DEFAULT", "commitdiff")):
+ raise ValueError("Specified commit diff doesn't exist or not "
+ "readable.")
+
+ difffile = os.path.join(statedir, "commit.diff")
+ shutil.copy(cfg.get("DEFAULT", "commitdiff"), difffile)
+ cfg.set("DEFAULT", "commitdiff", difffile)
+
+ p = ProcessOutput()
+
+ def handleChecksum(newchecksum):
+ newchecksum = newchecksum.strip()
+ logging.debug("Checksum calculated as {} (was {}). Progress is {}"
+ .format(newchecksum, oldchecksum, progress))
+ if newchecksum == oldchecksum:
+ return progress
+ else:
+ with open(os.path.join(statedir, "progress"), "w") as file:
+ file.write(json.dumps({}))
+ with open(os.path.join(statedir, "checksum"), "w") as file:
+ file.write(newchecksum)
+ return {}
+
+ p.deferred.addCallbacks(handleChecksum, lambda f: {})
+ reactor.spawnProcess(p, "md5sum", ("md5sum", difffile),
+ env=os.environ, path=self.dir)
+ return p.deferred
+
+ def start_listening(self):
+ """
+ This (perhaps poorly named) method might change the qr into a client
+ if it fails to set up the server.
+
+ """
+ self.conn = reactor.listenUNIX(self._sockpath, pb.PBServerFactory(self))
+ logging.info("Kermit Queue Runner started")
+ reactor.callLater(60, self._consider_shutdown)
+
+ @staticmethod
+ def send_queues(dir, cfgfile=None, abort=None):
+ reactor.callWhenRunning(QueueRunner._send_queues, dir, cfgfile, abort)
+
+ @staticmethod
+ @defer.inlineCallbacks
+ def _send_queues(dir, cfgfile=None, abort=None):
+ # This means we failed to listen. So send instead.
+ logging.debug("Failed to listen, so sending our queue instead")
+ factory = pb.PBClientFactory()
+ reactor.connectUNIX(_makesockname(dir), factory)
+ root = yield factory.getRootObject()
+ try:
+ if cfgfile:
+ yield root.callRemote("enqueue", cfgfile)
+ if abort:
+ yield root.callRemote("abort", abort)
+ except Exception as e:
+ logging.debug("Total failure")
+ logging.debug(str(fail))
+ reactor.stop()
+
+ def remote_enqueue(self, cfgfile):
+ """Callback for enqueuing a regular commit (from a peer process)."""
+ self.enqueue(cfgfile)
+
+ def remote_abort(self, key, ws=None):
+ logging.debug("Being told to abort {} in {}".format(key, ws))
+ # Both local and remote workspaces can get this call!
+ self.abort(key, ws)
+
+ def remote_handover(self, remote_op, cfgfile, ws, stages):
+ logging.debug("Been handed something to do: {} in {} (stages: {})"
+ .format(cfgfile, ws, stages))
+
+ cfg = cfgparser.CfgParser(_CFG_DEFAULTS)
+ cfg.read(cfgfile)
+
+ target = cfg.get("DEFAULT", "target")
+ key = cfg.get("DEFAULT", "key")
+
+ operation = _SharedOperation(self, cfgfile, cfg, target,
+ cfg.section(ws, category="workspace"),
+ remote_op, stages)
+
+ self._workqueues[ws][key] = operation
+ reactor.callWhenRunning(self._process_ws_queue, ws)
+
+
+class ProcessOutput(protocol.ProcessProtocol):
+ """
+ A nice simple ProcessProtocol that returns the stdout of the process.
+
+ Callers should use the .deferred field to receive the result.
+
+ """
+ def __init__(self):
+ self.deferred = defer.Deferred()
+ self.output = ""
+ def outReceived(self, data):
+ self.output += data
+ def processEnded(self, reason):
+ self.deferred, d = None, self.deferred
+ if reason.value.exitCode == 0:
+ d.callback(self.output)
+ else:
+ d.errback(reason)
+
+
+def main():
+ parser = ArgumentParser()
+ parser.add_argument("-a", "--abort")
+ parser.add_argument("userdir")
+ parser.add_argument("cfgfile", nargs='?')
+
+ args = parser.parse_args()
+
+ qr = QueueRunner(args.userdir)
+ try:
+ qr.start_listening()
+ except error.CannotListenError as e:
+ QueueRunner.send_queues(args.userdir, args.cfgfile, args.abort)
+ else:
+ if args.cfgfile:
+ qr.enqueue(args.cfgfile)
+ if args.abort:
+ qr.abort(args.abort)
+
+ reactor.run()
+
+if __name__ == "__main__":
+ main()
=== added file 'bin/kermit_trigger'
--- bin/kermit_trigger 1970-01-01 00:00:00 +0000
+++ bin/kermit_trigger 2014-05-09 09:53:50 +0000
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+import os
+import sys
+import pwd
+import StringIO
+import argparse
+import subprocess
+import ConfigParser
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-w", "--workspace", action="store_true")
+ parser.add_argument("-a", "--abort")
+
+ args = parser.parse_args()
+
+ # Read the config file in from stdin
+ config = sys.stdin.read()
+
+ cfg = ConfigParser.SafeConfigParser()
+ cfg.readfp(StringIO.StringIO(config))
+
+ target = cfg.get("DEFAULT", "target")
+
+ basedir = cfg.get(target, "dir")
+ userdir = os.path.join(basedir, pwd.getpwuid(os.getuid())[0])
+ if not os.path.exists(userdir):
+ os.makedirs(userdir)
+
+ cmd = ["screen", "-c", "/dev/null", "-d", "-m",
+ "kermit_queue_runner", userdir]
+
+ key = cfg.get("DEFAULT", "key")
+ cfg.set("DEFAULT", "logdir", os.path.join(userdir, 'logs', key))
+
+ filename = os.path.join(basedir, ".kermit", key + ".cfg")
+ if not args.workspace and not args.abort:
+ if not os.path.exists(os.path.dirname(filename)):
+ os.makedirs(os.path.dirname(filename))
+
+ with open(filename, "w") as file:
+ cfg.write(file)
+
+ cmd.append(filename)
+
+ if args.abort:
+ cmd.extend(("-a", args.abort))
+
+ st = subprocess.call(cmd)
+ sys.exit(st)
+
+if __name__ == "__main__":
+ main()
=== modified file 'debian/control'
--- debian/control 2014-04-05 23:25:54 +0000
+++ debian/control 2014-05-09 09:53:50 +0000
@@ -12,3 +12,17 @@
Description: XMPP bot framework
EnDroid is a framework for building bots in XMPP. Its architecture
is based around plugins, in order to make it as extensibile as possible.
+
+Package: endroid-kermit
+Architecture: all
+Depends: python,python-twisted,endroid,${misc:Depends}
+Suggests: endroid-kermit-daemons
+Description: Continuous Integration commit queue for EnDroid
+ Kermit is a CI commit queue handling plugin for EnDroid.
+
+Package: endroid-kermit-daemons
+Architecture: all
+Depends: python,python-twisted,screen,${misc:Depends}
+Description: Daemon processes for the Kermit EnDroid plugin
+ This package must be installed on any servers that will be used as Kermit
+ commit hosts.
=== added file 'debian/endroid-kermit-daemons.install'
--- debian/endroid-kermit-daemons.install 1970-01-01 00:00:00 +0000
+++ debian/endroid-kermit-daemons.install 2014-05-09 09:53:50 +0000
@@ -0,0 +1,3 @@
+bin/kermit usr/bin/
+bin/kermit_trigger usr/bin/
+bin/kermit_queue_runner usr/bin/
=== added file 'debian/endroid-kermit.install'
--- debian/endroid-kermit.install 1970-01-01 00:00:00 +0000
+++ debian/endroid-kermit.install 2014-05-09 09:53:50 +0000
@@ -0,0 +1,19 @@
+resources/kermit/profile.html usr/share/endroid/templates/kermit/
+resources/kermit/queue.html usr/share/endroid/templates/kermit/
+resources/kermit/response.json usr/share/endroid/templates/kermit/
+resources/kermit/alert.js usr/share/endroid/media/kermit/
+resources/kermit/queue.js usr/share/endroid/media/kermit/
+resources/kermit/profile.js usr/share/endroid/media/kermit/
+resources/kermit/services.js usr/share/endroid/media/kermit/
+resources/kermit/alert.css usr/share/endroid/media/kermit/
+resources/kermit/queue.css usr/share/endroid/media/kermit/
+resources/kermit/profile.css usr/share/endroid/media/kermit/
+resources/kermit/kermit.png usr/share/endroid/media/kermit/
+resources/kermit/kermit-logo.jpg usr/share/endroid/media/kermit/
+resources/kermit/kermit.css usr/share/endroid/media/kermit/
+resources/kermit/kermit-icon-120.png usr/share/endroid/media/kermit/
+resources/kermit/kermit-icon-152.png usr/share/endroid/media/kermit/
+resources/kermit/kermit-icon-60.png usr/share/endroid/media/kermit/
+resources/kermit/kermit-icon-76.png usr/share/endroid/media/kermit/
+resources/kermit/kermit-icon.png usr/share/endroid/media/kermit/
+resources/kermit/kermit_splash.png usr/share/endroid/media/kermit/
=== added file 'debian/endroid-kermit.pyinstall'
--- debian/endroid-kermit.pyinstall 1970-01-01 00:00:00 +0000
+++ debian/endroid-kermit.pyinstall 2014-05-09 09:53:50 +0000
@@ -0,0 +1,1 @@
+src/endroid/plugins/kermit/*py endroid.plugins.kermit
=== modified file 'debian/endroid.install'
--- debian/endroid.install 2014-04-05 19:53:27 +0000
+++ debian/endroid.install 2014-05-09 09:53:50 +0000
@@ -6,6 +6,6 @@
bin/spelunk_hi5s usr/sbin/
lib/wokkel-0.7.1-py2.7.egg usr/lib/endroid/dependencies
var/endroid.db var/lib/endroid/db
-doc/EnDroid.png usr/share/endroid/media/
+resources/httpinterface/EnDroid.png usr/share/endroid/media/httpinterface/
resources/httpinterface/index.html usr/share/endroid/templates/httpinterface/
resources/httpinterface/notfound.html usr/share/endroid/templates/httpinterface/
=== added file 'debian/endroid.pyinstall'
--- debian/endroid.pyinstall 1970-01-01 00:00:00 +0000
+++ debian/endroid.pyinstall 2014-05-09 09:53:50 +0000
@@ -0,0 +1,3 @@
+src/endroid/*py endroid
+src/endroid/plugins/*py endroid.plugins
+src/endroid/plugins/compute/*py endroid.plugins.compute
=== added file 'etc/kermit.cfg'
--- etc/kermit.cfg 1970-01-01 00:00:00 +0000
+++ etc/kermit.cfg 2014-05-09 09:53:50 +0000
@@ -0,0 +1,157 @@
+# Configuration file for Kermit
+
+# Section names:
+#
+# - A naked section name describes a "target". This is something that the user
+# types in, and provides the starting point for what configuration applies
+# to a given commit.
+# - "workspace:" category sections describe auxilliary workspaces that may
+# be used to perform commits. Note that these are in addition to the
+# commit workspace that is created specifically for any given commit.
+# - "command:" category sections describe the commands that can be run to
+# perform a commit.
+#
+# 'target' sections will reference 'workspace' and 'command' sections; and
+# 'workspace' sections can also reference 'command' sections; and 'command'
+# sections can also reference other 'command' sections.
+#
+# Within 'target' sections, the following items have meaning:
+#
+# - "dir" is the directory that all operations for this target are performed
+# in. Note that multiple targets can share the same directory, in that the
+# same queues simply get used per person. Each workspace (including the
+# per-commit one) are rooted in this directory, so ensure commands won't
+# conflict with other commands run in parallel.
+# - "workspaces" is a list of the auxilliary workspaces that may be used to
+# perform commits to this target. If a given command requests a workspace
+# that is not listed for that target, then the command is simply run in
+# the commit workspace instead.
+# - "pre_cmds" and "post_cmds" are the lists of commands to run in the commit
+# workspace before/after the commit commands themselves.
+# - "commands" is the list of commands that need to be run (and pass) to
+# perform the actual commit. Each command implicitly depends upon the
+# completion of the command prior to it, and won't run until that command
+# has passed (though not necessarily within the same run. See the
+# "dependencies" option under the "command:" category, below).
+# - "server" specifies the server on which the commands will be run. Kermit
+# will try to ssh to this server as each user, using keys uploaded via the
+# key interface.
+# - "fingerprints" specifies a list (although one is usually sufficient) of
+# ssh fingerprints for the destination server. This can be set to the special
+# string 'ignore' if fingerprint checking should be disabled - though this
+# is not recommended.
+#
+# Within 'workspace' sections, the following items have meaning:
+#
+# - "username" is the name of the user that owns the workspace. This is used
+# to communicate with the right kermit process. Note that if left blank, the
+# workspace is assumed to be owned by the user owning the commit.
+# - "setup_cmds" and "abort_cmds" are lists of commands used to prepare a
+# shared workspace, or tear it down respectively. These commands are only
+# run when necessary, which is to say when the "pre_cmds" for the workspace
+# fail. The intention is to run expensive commands here, so that commits
+# making use of these workspaces can complete faster. Note that if "pre_cmds"
+# fails, forcing a rerun of the "setup_cmds", then the "pre_cmds" are not
+# run as well (i.e. "setup_cmds" is expected to perform the actions of
+# "pre_cmds" if they are necessary).
+# - "pre_cmds" and "post_cmds" are lists of commands to run before/after any
+# commit commands that have been offloaded to this workspace. "pre_cmds"
+# should include any items that are needed to prepare the workspace, and
+# make sure it is still valid (e.g. up to date). If any of them fail, it
+# will cause the workspaces to be torn down and set up from scratch.
+# "post_cmds" will be run no matter what happens (completion of all steps,
+# failure of a step, or abort of the whole commit), and must leave the
+# workspace ready for the next commit.
+#
+# Within 'command' sections, the following items have meaning:
+#
+# - "command" is the command line to run. This is parsed using shell parsing
+# rules, so quotes can be used to join space-separated words into single
+# tokens.
+# - "pwd" is the working directory to run the command in, relative to the
+# base directory of the target. Note that the working directory may not
+# be higher than the base directory of the target (with the default being
+# "./", i.e. the target directory).
+# - "dependencies" is a list of other commands that must always be run in the
+# workspace prior to this command. These commands will be run, even if they
+# have previously succeded for the patch in question and normally would
+# persist their results.
+# - "persist" is a boolean indicating whether the results of this command can
+# be kept from run to run (assuming the checksum of the patch to commit
+# does not change between runs). Default is no.
+# - "attempts" is the number of times to attempt the command. This can be
+# used for commands that are known to be flakey, or fail intermittently.
+# Default is 1.
+# - "attempt_interval" is the number of seconds to wait between retries of
+# this command. Default is 10.
+# - "workspace" is the name of the shared workspace to offload this command
+# to. This will only happen if the target lists this workspace as a valid
+# one in its "workspaces" configuration option.
+# - "environment" is a list of "key: value" pairs that are set in the
+# environment of the command when spawned. Default is to inherit the env
+# of the kermit process. Note that these variables are added (or
+# overwrite) the starting environment, and do not replace it.
+# - "filevars" is a list of "varname: filename" pairs that describe
+# interpolation variables (see below) whose values should be the contents
+# of the given files. See the section on value interpolation below for the
+# full semantics of this option.
+# - "savelogs" is a list of files that should be saved into the log directory.
+# Usually these are output files generated in some temporary directory, and
+# that will be either useful to the user, or required in a later step. The
+# file name in the logdir is the basename of the input file.
+#
+# Certain options in the "command:" sections can contain interpolation
+# variables (using Python's .format and attr-based lookups; i.e. {varname}).
+# The values available are looked up in order from the following:
+#
+# - Any "filevars" defined in the section.
+# - Any options defined in the "command:" section.
+# - Any options defined in the corresponding "workspace:" section. This only
+# applies to operations running in a shared workspace.
+# - Any options defined in the corresponding target section.
+# - Various global parameters (see below).
+#
+# Note that options here are not restricted to options understood by kermit.
+# Any options may be specified in the config file, and they will be used in
+# interpolation, allowing re-usable command snippets to be created, and
+# tweaked as needed for each target.
+#
+# The following values are interpolated:
+# - The filename portion of any "filevars". Note that, obviously, the
+# interpolation here only includes variables already loaded. Variables are
+# set in the order defined, so earlier file contents can be used in the names
+# of later filevars, if required.
+# - The values of any environment variables.
+# - The command line (after lexical parsing of the command line into separate
+# tokens. i.e. any interpolated variable can only be a single token).
+# - The working directory for the command ("pwd").
+# - The paths to any files in the "savelogs" directive.
+#
+# The following global configuration options are always present and usable
+# (though they can be overriden in configuration sections if desired):
+#
+# - "logdir": the directory into which logs should be/are written.
+# - "target": the current target.
+# - "key": the unique identifier for this kermit operation.
+# - "commitdiff": path to the file being committed.
+# - "buglist": the list of bug IDs being committed against.
+# - "commitmsg": the (optional) commit message specified by the user.
+# - "username": the username of the user the commit is for (note: this is not
+# necessarily the user the command is running as, i.e. in the case of a
+# shared workspace running as a separate user being used to run part of the
+# commit).
+# - "stages": list of the stages requested by the user (note: this may not
+# be exactly the stages being run, since the user may not have correctly
+# taken dependencies into account).
+#
+# There are also some cosmetic-only configuration options, used to customise
+# the web interface:
+#
+# - "message" is configurable under the 'target' section, is interpolated (with
+# options from the target and global sections only), and is displayed within
+# the commit panel on the web interface. An example might be to let the user
+# know where log files can be accessed.
+# - "description" is configured under the 'command' sections, and replaces the
+# short command name wherever the command is mentioned in the web interface.
+# - "recovery" is also configured under the 'command' sections, and is shown
+# in an alert within the command panel whenever the command fails.
=== renamed file 'doc/EnDroid.png' => 'resources/httpinterface/EnDroid.png'
=== modified file 'resources/httpinterface/index.html'
--- resources/httpinterface/index.html 2014-04-05 11:42:51 +0000
+++ resources/httpinterface/index.html 2014-05-09 09:53:50 +0000
@@ -5,7 +5,7 @@
+
+
+
+ No keys currently configured.
+
+
+
+
+ Place one of the public keys listed below into the
+ .ssh/authorized_keys file on each server that you may
+ use for commits.
+
+
+ {% raw %}
+