Merge ~afreiberger/charm-grafana:add_tests_and_linting into charm-grafana:master
- Git
- lp:~afreiberger/charm-grafana
- add_tests_and_linting
- Merge into master
Status: | Superseded |
---|---|
Proposed branch: | ~afreiberger/charm-grafana:add_tests_and_linting |
Merge into: | charm-grafana:master |
Diff against target: |
879 lines (+583/-53) 16 files modified
.gitignore (+22/-0) Makefile (+49/-0) lib/charms/layer/grafana.py (+2/-3) reactive/grafana.py (+94/-50) requirements.txt (+1/-0) tests/functional/conftest.py (+67/-0) tests/functional/juju_tools.py (+68/-0) tests/functional/requirements.txt (+6/-0) tests/functional/test_deploy.py (+103/-0) tests/unit/conftest.py (+69/-0) tests/unit/example.cfg (+1/-0) tests/unit/requirements.txt (+5/-0) tests/unit/test_actions.py (+12/-0) tests/unit/test_lib.py (+12/-0) tox.ini (+71/-0) wheelhouse.txt (+1/-0) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Canonical IS Reviewers | Pending | ||
Llama (LMA) Charmers | Pending | ||
Review via email: mp+377471@code.launchpad.net |
This proposal has been superseded by a proposal from 2020-01-11.
Commit message
Description of the change
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote : | # |
Unable to determine commit message from repository - please click "Set commit message" and enter the commit message manually.
Unmerged commits
- d3370e1... by Drew Freiberger
-
Added testing and resolved lint errors
WIP
- f2a62d8... by Drew Freiberger
-
Check dashboards before uploading new revisions
It was found that dashboards were being uploaded and creating unbounded
revision history once every 5 minutes during update-status causing the
grafana.db configuration database to balloon. To eliminate this, we
now validate that the rendered dashboard template result is not already
the version available in the grafana database before uploading.Some refactoring of the dashboard function has been made to resolve
complexity warnings.Closes-Bug: 1858490
Preview Diff
1 | diff --git a/.gitignore b/.gitignore |
2 | new file mode 100644 |
3 | index 0000000..32e2995 |
4 | --- /dev/null |
5 | +++ b/.gitignore |
6 | @@ -0,0 +1,22 @@ |
7 | +# Byte-compiled / optimized / DLL files |
8 | +__pycache__/ |
9 | +*.py[cod] |
10 | +*$py.class |
11 | + |
12 | +# Log files |
13 | +*.log |
14 | + |
15 | +.tox/ |
16 | +.coverage |
17 | + |
18 | +# vi |
19 | +.*.swp |
20 | + |
21 | +# pycharm |
22 | +.idea/ |
23 | + |
24 | +# version data |
25 | +repo-info |
26 | + |
27 | +# reports |
28 | +report/* |
29 | diff --git a/Makefile b/Makefile |
30 | new file mode 100644 |
31 | index 0000000..b357248 |
32 | --- /dev/null |
33 | +++ b/Makefile |
34 | @@ -0,0 +1,49 @@ |
35 | +help: |
36 | + @echo "This project supports the following targets" |
37 | + @echo "" |
38 | + @echo " make help - show this text" |
39 | + @echo " make submodules - make sure that the submodules are up-to-date" |
40 | + @echo " make lint - run flake8" |
41 | + @echo " make test - run the unittests and lint" |
42 | + @echo " make unittest - run the tests defined in the unittest subdirectory" |
43 | + @echo " make functional - run the tests defined in the functional subdirectory" |
44 | + @echo " make release - build the charm" |
45 | + @echo " make clean - remove unneeded files" |
46 | + @echo "" |
47 | + |
48 | +submodules: |
49 | + @echo "Cloning submodules" |
50 | + @git submodule update --init --recursive |
51 | + |
52 | +lint: |
53 | + @echo "Running flake8" |
54 | + @tox -e lint |
55 | + |
56 | +test: lint unittest functional |
57 | + |
58 | +unittest: |
59 | + @tox -e unit |
60 | + |
61 | +functional: build |
62 | + @PYTEST_KEEP_MODEL=$(PYTEST_KEEP_MODEL) \ |
63 | + PYTEST_CLOUD_NAME=$(PYTEST_CLOUD_NAME) \ |
64 | + PYTEST_CLOUD_REGION=$(PYTEST_CLOUD_REGION) \ |
65 | + tox -e functional |
66 | + |
67 | +build: |
68 | + @echo "Building charm to base directory $(JUJU_REPOSITORY)" |
69 | + @-git describe --tags > ./repo-info |
70 | + @CHARM_LAYERS_DIR=./layers CHARM_INTERFACES_DIR=./interfaces TERM=linux \ |
71 | + JUJU_REPOSITORY=$(JUJU_REPOSITORY) charm build . --force |
72 | + |
73 | +release: clean build |
74 | + @echo "Charm is built at $(JUJU_REPOSITORY)/builds" |
75 | + |
76 | +clean: |
77 | + @echo "Cleaning files" |
78 | + @if [ -d .tox ] ; then rm -r .tox ; fi |
79 | + @if [ -d .pytest_cache ] ; then rm -r .pytest_cache ; fi |
80 | + @find . -iname __pycache__ -exec rm -r {} + |
81 | + |
82 | +# The targets below don't depend on a file |
83 | +.PHONY: lint test unittest functional build release clean help submodules |
84 | diff --git a/lib/charms/layer/grafana.py b/lib/charms/layer/grafana.py |
85 | index b482203..67b53ea 100644 |
86 | --- a/lib/charms/layer/grafana.py |
87 | +++ b/lib/charms/layer/grafana.py |
88 | @@ -2,13 +2,12 @@ |
89 | |
90 | import json |
91 | import requests |
92 | +from charmhelpers.core import unitdata |
93 | from charmhelpers.core.hookenv import ( |
94 | config, |
95 | log, |
96 | ) |
97 | |
98 | -from charmhelpers.core import unitdata |
99 | - |
100 | |
101 | def get_admin_password(): |
102 | kv = unitdata.kv() |
103 | @@ -30,7 +29,7 @@ def import_dashboard(dashboard, name=None): |
104 | name = dashboard['dashboard'].get('title') or 'Untitled' |
105 | headers = {'Content-Type': 'application/json'} |
106 | import_url = 'http://localhost:{}/api/dashboards/db'.format( |
107 | - config('port')) |
108 | + config('port')) |
109 | passwd = get_admin_password() |
110 | if passwd is None: |
111 | return (False, 'Unable to retrieve grafana password.') |
112 | diff --git a/reactive/grafana.py b/reactive/grafana.py |
113 | index e1dc444..3f24f0a 100644 |
114 | --- a/reactive/grafana.py |
115 | +++ b/reactive/grafana.py |
116 | @@ -4,12 +4,11 @@ import glob |
117 | import json |
118 | import os |
119 | import re |
120 | -import requests |
121 | import shutil |
122 | -import six |
123 | import subprocess |
124 | import time |
125 | |
126 | +from charmhelpers import fetch |
127 | from charmhelpers.contrib.charmsupport import nrpe |
128 | from charmhelpers.core import ( |
129 | hookenv, |
130 | @@ -17,11 +16,9 @@ from charmhelpers.core import ( |
131 | unitdata, |
132 | ) |
133 | from charmhelpers.core.templating import render |
134 | -from charmhelpers import fetch |
135 | -from charms.reactive.helpers import ( |
136 | - any_file_changed, |
137 | - is_state, |
138 | -) |
139 | + |
140 | +from charms.layer import snap |
141 | +from charms.layer.grafana import import_dashboard |
142 | from charms.reactive import ( |
143 | hook, |
144 | remove_state, |
145 | @@ -29,11 +26,20 @@ from charms.reactive import ( |
146 | when, |
147 | when_not, |
148 | ) |
149 | +from charms.reactive.helpers import ( |
150 | + any_file_changed, |
151 | + is_state, |
152 | +) |
153 | |
154 | -from charms.layer import snap |
155 | -from charms.layer.grafana import import_dashboard |
156 | from jinja2 import Environment, FileSystemLoader, exceptions |
157 | |
158 | +from jsondiff import diff |
159 | + |
160 | +import requests |
161 | + |
162 | +import six |
163 | + |
164 | + |
165 | SVCNAME = {'snap': 'snap.grafana.grafana', |
166 | 'apt': 'grafana-server'} |
167 | SNAP_NAME = 'grafana' |
168 | @@ -119,8 +125,7 @@ def install_packages(): |
169 | set_state('grafana.installed') |
170 | hookenv.status_set('active', 'Completed installing grafana') |
171 | elif source == 'snap' and \ |
172 | - (host.lsb_release()['DISTRIB_CODENAME'] >= 'xenial' or |
173 | - host.lsb_release()['DISTRIB_CODENAME'] < 'p'): |
174 | + (host.lsb_release()['DISTRIB_CODENAME'] >= 'xenial' or host.lsb_release()['DISTRIB_CODENAME'] < 'p'): |
175 | # NOTE(aluria): precise is the last supported Ubuntu release, so |
176 | # anything below 'p' is actually newer than xenial (systemd support) |
177 | snap.install(SNAP_NAME, channel=channel, force_dangerous=False) |
178 | @@ -432,8 +437,9 @@ def configure_website(website): |
179 | |
180 | |
181 | def validate_datasources(): |
182 | - """TODO: make sure datasources option is merged with |
183 | - relation data |
184 | + """Verify that datasources configuration is valid, if existing. |
185 | + |
186 | + TODO: make sure datasources option is merged with relation data |
187 | TODO: make sure datasources are validated |
188 | """ |
189 | config = hookenv.config() |
190 | @@ -447,7 +453,8 @@ def validate_datasources(): |
191 | |
192 | |
193 | def check_datasource(ds): |
194 | - """ |
195 | + """Check for and add datasources not currently in grafana DB. |
196 | + |
197 | CREATE TABLE `data_source` ( |
198 | `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL |
199 | , `org_id` INTEGER NOT NULL |
200 | @@ -469,7 +476,6 @@ def check_datasource(ds): |
201 | , `with_credentials` INTEGER NOT NULL DEFAULT 0); |
202 | INSERT INTO "data_source" VALUES(1,1,0,'prometheus','BootStack Prometheus','proxy','http://localhost:9090','','','',0,'','',1,'{}','2016-01-22 12:11:06','2016-01-22 12:11:11',0); |
203 | """ # noqa E501 |
204 | - |
205 | # ds will be similar to: |
206 | # {'service_name': 'prometheus', |
207 | # 'url': 'http://10.0.3.216:9090', |
208 | @@ -504,9 +510,8 @@ def check_datasource(ds): |
209 | |
210 | # This isn't exposed in charmhelpers: https://github.com/juju/charm-helpers/issues/367 |
211 | def render_custom(source, context, **parameters): |
212 | - """ |
213 | - Renders a template from the template folder with custom environment |
214 | - parameters. |
215 | + """Render a template from the template folder with custom environment parameters. |
216 | + |
217 | source: template file name to render from |
218 | context: template context variables |
219 | parameters: initialization parameters for the jinja Environment |
220 | @@ -523,6 +528,69 @@ def render_custom(source, context, **parameters): |
221 | return template.render(context) |
222 | |
223 | |
224 | +def get_current_dashboards(port, passwd): |
225 | + # Get current dashboards |
226 | + get_req = 'http://127.0.0.1:{}/api/search?type=dash-db'.format(port) |
227 | + response = requests.get(get_req, auth=('admin', passwd)) |
228 | + if response.status_code == 200: |
229 | + return response.json() |
230 | + return [] |
231 | + |
232 | + |
233 | +def get_current_dashboard_json(uid, port, passwd): |
234 | + if uid is not None: |
235 | + response = requests.get("http://127.0.0.1:{}/api/dashboards/uid/{}".format(port, uid), auth=('admin', passwd)) |
236 | + if response.status_code == 200: |
237 | + try: |
238 | + return json.loads(response.text)["dashboard"] |
239 | + except json.decoder.JSONDecodeError: |
240 | + pass |
241 | + return json.loads('{"version": 0}') |
242 | + |
243 | + |
244 | +def check_and_add_dashboard(filename, context, prom_metrics, dash_to_uid, port, gf_adminpasswd): |
245 | + dashboard_str = render_custom(source=filename, |
246 | + context=context, |
247 | + variable_start_string="<<", |
248 | + variable_end_string=">>", |
249 | + ) |
250 | + hookenv.log("Checking Dashboard Template: {}".format(filename)) |
251 | + expr = str(re.findall('"expr":(.*),', dashboard_str)) |
252 | + metrics = set(re.findall('[a-zA-Z0-9]*_[a-zA-Z0-9_]*', expr)) |
253 | + if not metrics: |
254 | + hookenv.log("Skipping Dashboard Template: {} no metrics in template" |
255 | + " {}".format(filename, metrics)) |
256 | + return |
257 | + |
258 | + missing_metrics = set([x for x in metrics if x not in prom_metrics]) |
259 | + if missing_metrics: |
260 | + hookenv.log("Skipping Dashboard Template: {} missing {} metrics." |
261 | + "Missing: {}".format(filename, len(missing_metrics), |
262 | + ', '.join(missing_metrics) |
263 | + ), hookenv.DEBUG) |
264 | + return |
265 | + |
266 | + dashboard_json = json.loads(dashboard_str) |
267 | + # before uploading the dashboard, we should check that it doesn't already exist with same data lp#1858490 |
268 | + new = dashboard_json["dashboard"] |
269 | + curr = get_current_dashboard_json(dash_to_uid.get(dashboard_json["dashboard"]["title"], None), port, gf_adminpasswd) |
270 | + # must remove the versions as they will likely be different |
271 | + del new["version"] |
272 | + del curr["version"] |
273 | + json_diff_result = diff(new, curr) |
274 | + if json_diff_result: |
275 | + hookenv.log("Skipping Dashboard Template: already up to date: {}".format(filename)) |
276 | + return |
277 | + |
278 | + hookenv.log("Using Dashboard Template: {}".format(filename)) |
279 | + post_req = 'http://127.0.0.1:{}/api/dashboards/db'.format(port) |
280 | + r = requests.post(post_req, json=dashboard_json, auth=('admin', gf_adminpasswd)) |
281 | + |
282 | + if r.status_code != 200: |
283 | + hookenv.log("Posting template {} failed with error:" |
284 | + " {}".format(filename, r.text), 'ERROR') |
285 | + |
286 | + |
287 | def generate_prometheus_dashboards(gf_adminpasswd, ds): |
288 | # prometheus_host = ds |
289 | ds_name = '{} - {}'.format(ds['service_name'], ds['description']) |
290 | @@ -534,9 +602,11 @@ def generate_prometheus_dashboards(gf_adminpasswd, ds): |
291 | ' {}'.format(response.status_code), 'ERROR') |
292 | return |
293 | |
294 | + current_dashboards = get_current_dashboards(config["port"], gf_adminpasswd) |
295 | + dash_to_uid = {dash['title']: dash['uid'] for dash in current_dashboards if 'title' in dash and 'uid' in dash} |
296 | + |
297 | prom_metrics = response.json()['data'] |
298 | templates_dir = 'templates/dashboards/prometheus' |
299 | - post_req = 'http://127.0.0.1:{}/api/dashboards/db'.format(config['port']) |
300 | context = {'datasource': ds_name, |
301 | 'external_network': config['external_network'], |
302 | 'bcache_enabled': "bcache_cache_hit_ratio" in prom_metrics, |
303 | @@ -551,33 +621,9 @@ def generate_prometheus_dashboards(gf_adminpasswd, ds): |
304 | 'ip_status', 'neutron_net', config['external_network'], |
305 | 'neutron_public_ip_usage'] |
306 | prom_metrics.extend(ignore_metrics) |
307 | - for filename in os.listdir(templates_dir): |
308 | - dashboard_str = render_custom(source=filename, |
309 | - context=context, |
310 | - variable_start_string="<<", |
311 | - variable_end_string=">>", |
312 | - ) |
313 | - hookenv.log("Checking Dashboard Template: {}".format(filename)) |
314 | - expr = str(re.findall('"expr":(.*),', dashboard_str)) |
315 | - metrics = set(re.findall('[a-zA-Z0-9]*_[a-zA-Z0-9_]*', expr)) |
316 | - if not metrics: |
317 | - hookenv.log("Skipping Dashboard Template: {} no metrics in template" |
318 | - " {}".format(filename, metrics)) |
319 | - continue |
320 | - missing_metrics = set([x for x in metrics if x not in prom_metrics]) |
321 | - if missing_metrics: |
322 | - hookenv.log("Skipping Dashboard Template: {} missing {} metrics." |
323 | - "Missing: {}".format(filename, len(missing_metrics), |
324 | - ', '.join(missing_metrics) |
325 | - ), hookenv.DEBUG) |
326 | - else: |
327 | - hookenv.log("Using Dashboard Template: {}".format(filename)) |
328 | - dashboard_json = json.loads(dashboard_str) |
329 | - r = requests.post(post_req, json=dashboard_json, auth=('admin', gf_adminpasswd)) |
330 | |
331 | - if r.status_code != 200: |
332 | - hookenv.log("Posting template {} failed with error:" |
333 | - " {}".format(filename, r.text), 'ERROR') |
334 | + for filename in os.listdir(templates_dir): |
335 | + check_and_add_dashboard(filename, context, prom_metrics, dash_to_uid, config["port"], gf_adminpasswd) |
336 | |
337 | |
338 | def generate_query(ds, is_default, id=None): |
339 | @@ -661,7 +707,8 @@ def generate_query(ds, is_default, id=None): |
340 | @when('grafana.started') |
341 | @when_not('grafana.admin_password.set') |
342 | def check_adminuser(): |
343 | - """ |
344 | + """Create Adminuser if not existing. |
345 | + |
346 | CREATE TABLE `user` ( |
347 | `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL |
348 | , `version` INTEGER NOT NULL |
349 | @@ -681,7 +728,6 @@ def check_adminuser(): |
350 | ); |
351 | INSERT INTO "user" VALUES(1,0,'admin','root+bootstack-ps45@canonical.com','BootStack Team','309bc4e78bc60d02dc0371d9e9fa6bf9a809d5dc25c745b9e3f85c3ed49c6feccd4ffc96d1db922f4297663a209e93f7f2b6','LZeJ3nSdrC','hseJcLcnPN','',1,1,0,'light','2016-01-22 12:00:08','2016-01-22 12:02:13'); |
352 | """ # noqa E501 |
353 | - |
354 | # XXX: If you add any dependencies on config items here, |
355 | # be sure to update config_changed() accordingly! |
356 | |
357 | @@ -717,9 +763,7 @@ def check_adminuser(): |
358 | query = cur.execute('SELECT id, login, salt FROM user') |
359 | for row in query.fetchall(): |
360 | if row[1] == 'admin': |
361 | - nagios_context = config.get('nagios_context', False) |
362 | - if not nagios_context: |
363 | - nagios_context = 'UNKNOWN' |
364 | + nagios_context = config.get('nagios_context', 'UNKNOWN') |
365 | email = 'root+%s@canonical.com' % nagios_context |
366 | hpasswd = hpwgen(passwd, row[2]) |
367 | if hpasswd: |
368 | diff --git a/requirements.txt b/requirements.txt |
369 | new file mode 100644 |
370 | index 0000000..8462291 |
371 | --- /dev/null |
372 | +++ b/requirements.txt |
373 | @@ -0,0 +1 @@ |
374 | +# Include python requirements here |
375 | diff --git a/tests/functional/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc b/tests/functional/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc |
376 | new file mode 100644 |
377 | index 0000000..54c4d14 |
378 | Binary files /dev/null and b/tests/functional/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc differ |
379 | diff --git a/tests/functional/__pycache__/juju_tools.cpython-37.pyc b/tests/functional/__pycache__/juju_tools.cpython-37.pyc |
380 | new file mode 100644 |
381 | index 0000000..ce8ffdc |
382 | Binary files /dev/null and b/tests/functional/__pycache__/juju_tools.cpython-37.pyc differ |
383 | diff --git a/tests/functional/__pycache__/test_deploy.cpython-37-pytest-5.3.2.pyc b/tests/functional/__pycache__/test_deploy.cpython-37-pytest-5.3.2.pyc |
384 | new file mode 100644 |
385 | index 0000000..27b64d7 |
386 | Binary files /dev/null and b/tests/functional/__pycache__/test_deploy.cpython-37-pytest-5.3.2.pyc differ |
387 | diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py |
388 | new file mode 100644 |
389 | index 0000000..1d2d6e9 |
390 | --- /dev/null |
391 | +++ b/tests/functional/conftest.py |
392 | @@ -0,0 +1,67 @@ |
393 | +#!/usr/bin/python3 |
394 | +""" |
395 | +Reusable pytest fixtures for functional testing |
396 | + |
397 | +Environment variables |
398 | +--------------------- |
399 | + |
400 | +PYTEST_CLOUD_REGION, PYTEST_CLOUD_NAME: cloud name and region to use for juju model creation |
401 | + |
402 | +PYTEST_KEEP_MODEL: if set, the testing model won't be torn down at the end of the testing session |
403 | + |
404 | +""" |
405 | + |
406 | +import asyncio |
407 | +import os |
408 | +import uuid |
409 | +import pytest |
410 | +import subprocess |
411 | + |
412 | +from juju.controller import Controller |
413 | +from juju_tools import JujuTools |
414 | + |
415 | + |
416 | +@pytest.fixture(scope='module') |
417 | +def event_loop(): |
418 | + """Override the default pytest event loop to allow for fixtures using a broader scope""" |
419 | + loop = asyncio.get_event_loop_policy().new_event_loop() |
420 | + asyncio.set_event_loop(loop) |
421 | + loop.set_debug(True) |
422 | + yield loop |
423 | + loop.close() |
424 | + asyncio.set_event_loop(None) |
425 | + |
426 | + |
427 | +@pytest.fixture(scope='module') |
428 | +async def controller(): |
429 | + """Connect to the current controller""" |
430 | + _controller = Controller() |
431 | + await _controller.connect_current() |
432 | + yield _controller |
433 | + await _controller.disconnect() |
434 | + |
435 | + |
436 | +@pytest.fixture(scope='module') |
437 | +async def model(controller): |
438 | + """This model lives only for the duration of the test""" |
439 | + model_name = "functest-{}".format(str(uuid.uuid4())[-12:]) |
440 | + _model = await controller.add_model(model_name, |
441 | + cloud_name=os.getenv('PYTEST_CLOUD_NAME'), |
442 | + region=os.getenv('PYTEST_CLOUD_REGION'), |
443 | + ) |
444 | + # https://github.com/juju/python-libjuju/issues/267 |
445 | + subprocess.check_call(['juju', 'models']) |
446 | + while model_name not in await controller.list_models(): |
447 | + await asyncio.sleep(1) |
448 | + yield _model |
449 | + await _model.disconnect() |
450 | + if not os.getenv('PYTEST_KEEP_MODEL'): |
451 | + await controller.destroy_model(model_name) |
452 | + while model_name in await controller.list_models(): |
453 | + await asyncio.sleep(1) |
454 | + |
455 | + |
456 | +@pytest.fixture(scope='module') |
457 | +async def jujutools(controller, model): |
458 | + tools = JujuTools(controller, model) |
459 | + return tools |
460 | diff --git a/tests/functional/juju_tools.py b/tests/functional/juju_tools.py |
461 | new file mode 100644 |
462 | index 0000000..4b4884f |
463 | --- /dev/null |
464 | +++ b/tests/functional/juju_tools.py |
465 | @@ -0,0 +1,68 @@ |
466 | +import pickle |
467 | +import juju |
468 | +import base64 |
469 | + |
470 | +# from juju.errors import JujuError |
471 | + |
472 | + |
473 | +class JujuTools: |
474 | + def __init__(self, controller, model): |
475 | + self.controller = controller |
476 | + self.model = model |
477 | + |
478 | + async def run_command(self, cmd, target): |
479 | + """ |
480 | + Runs a command on a unit. |
481 | + |
482 | + :param cmd: Command to be run |
483 | + :param unit: Unit object or unit name string |
484 | + """ |
485 | + unit = ( |
486 | + target |
487 | + if isinstance(target, juju.unit.Unit) |
488 | + else await self.get_unit(target) |
489 | + ) |
490 | + action = await unit.run(cmd) |
491 | + return action.results |
492 | + |
493 | + async def remote_object(self, imports, remote_cmd, target): |
494 | + """ |
495 | + Runs command on target machine and returns a python object of the result |
496 | + |
497 | + :param imports: Imports needed for the command to run |
498 | + :param remote_cmd: The python command to execute |
499 | + :param target: Unit object or unit name string |
500 | + """ |
501 | + python3 = "python3 -c '{}'" |
502 | + python_cmd = ('import pickle;' |
503 | + 'import base64;' |
504 | + '{}' |
505 | + 'print(base64.b64encode(pickle.dumps({})), end="")' |
506 | + .format(imports, remote_cmd)) |
507 | + cmd = python3.format(python_cmd) |
508 | + results = await self.run_command(cmd, target) |
509 | + return pickle.loads(base64.b64decode(bytes(results['Stdout'][2:-1], 'utf8'))) |
510 | + |
511 | + async def file_stat(self, path, target): |
512 | + """ |
513 | + Runs stat on a file |
514 | + |
515 | + :param path: File path |
516 | + :param target: Unit object or unit name string |
517 | + """ |
518 | + imports = 'import os;' |
519 | + python_cmd = ('os.stat("{}")' |
520 | + .format(path)) |
521 | + print("Calling remote cmd: " + python_cmd) |
522 | + return await self.remote_object(imports, python_cmd, target) |
523 | + |
524 | + async def file_contents(self, path, target): |
525 | + """ |
526 | + Returns the contents of a file |
527 | + |
528 | + :param path: File path |
529 | + :param target: Unit object or unit name string |
530 | + """ |
531 | + cmd = 'cat {}'.format(path) |
532 | + result = await self.run_command(cmd, target) |
533 | + return result['Stdout'] |
534 | diff --git a/tests/functional/requirements.txt b/tests/functional/requirements.txt |
535 | new file mode 100644 |
536 | index 0000000..f76bfbb |
537 | --- /dev/null |
538 | +++ b/tests/functional/requirements.txt |
539 | @@ -0,0 +1,6 @@ |
540 | +flake8 |
541 | +juju |
542 | +mock |
543 | +pytest |
544 | +pytest-asyncio |
545 | +requests |
546 | diff --git a/tests/functional/test_deploy.py b/tests/functional/test_deploy.py |
547 | new file mode 100644 |
548 | index 0000000..bf28c17 |
549 | --- /dev/null |
550 | +++ b/tests/functional/test_deploy.py |
551 | @@ -0,0 +1,103 @@ |
552 | +import os |
553 | +import pytest |
554 | +import subprocess |
555 | +import stat |
556 | + |
557 | +# Treat all tests as coroutines |
558 | +pytestmark = pytest.mark.asyncio |
559 | + |
560 | +juju_repository = os.getenv('JUJU_REPOSITORY', '.').rstrip('/') |
561 | +series = ['xenial', |
562 | + 'bionic', |
563 | + pytest.param('eoan', marks=pytest.mark.xfail(reason='canary')), |
564 | + ] |
565 | +sources = [('local', '{}/builds/grafana'.format(juju_repository)), |
566 | + # ('jujucharms', 'cs:...'), |
567 | + ] |
568 | + |
569 | + |
570 | +# Uncomment for re-using the current model, useful for debugging functional tests |
571 | +# @pytest.fixture(scope='module') |
572 | +# async def model(): |
573 | +# from juju.model import Model |
574 | +# model = Model() |
575 | +# await model.connect_current() |
576 | +# yield model |
577 | +# await model.disconnect() |
578 | + |
579 | + |
580 | +# Custom fixtures |
581 | +@pytest.fixture(params=series) |
582 | +def series(request): |
583 | + return request.param |
584 | + |
585 | + |
586 | +@pytest.fixture(params=sources, ids=[s[0] for s in sources]) |
587 | +def source(request): |
588 | + return request.param |
589 | + |
590 | + |
591 | +@pytest.fixture |
592 | +async def app(model, series, source): |
593 | + app_name = 'grafana-{}-{}'.format(series, source[0]) |
594 | + return await model._wait_for_new('application', app_name) |
595 | + |
596 | + |
597 | +async def test_grafana_deploy(model, series, source, request): |
598 | + # Starts a deploy for each series |
599 | + # Using subprocess b/c libjuju fails with JAAS |
600 | + # https://github.com/juju/python-libjuju/issues/221 |
601 | + application_name = 'grafana-{}-{}'.format(series, source[0]) |
602 | + cmd = ['juju', 'deploy', source[1], '-m', model.info.name, |
603 | + '--series', series, application_name] |
604 | + if request.node.get_closest_marker('xfail'): |
605 | + # If series is 'xfail' force install to allow testing against versions not in |
606 | + # metadata.yaml |
607 | + cmd.append('--force') |
608 | + subprocess.check_call(cmd) |
609 | + |
610 | + |
611 | +async def test_charm_upgrade(model, app): |
612 | + if app.name.endswith('local'): |
613 | + pytest.skip("No need to upgrade the local deploy") |
614 | + unit = app.units[0] |
615 | + await model.block_until(lambda: unit.agent_status == 'idle') |
616 | + subprocess.check_call(['juju', |
617 | + 'upgrade-charm', |
618 | + '--switch={}'.format(sources[0][1]), |
619 | + '-m', model.info.name, |
620 | + app.name, |
621 | + ]) |
622 | + await model.block_until(lambda: unit.agent_status == 'executing') |
623 | + |
624 | + |
625 | +# Tests |
626 | +async def test_grafana_status(model, app): |
627 | + # Verifies status for all deployed series of the charm |
628 | + await model.block_until(lambda: app.status == 'active') |
629 | + unit = app.units[0] |
630 | + await model.block_until(lambda: unit.agent_status == 'idle') |
631 | + |
632 | + |
633 | +async def test_example_action(app): |
634 | + unit = app.units[0] |
635 | + action = await unit.run_action('example-action') |
636 | + action = await action.wait() |
637 | + assert action.status == 'completed' |
638 | + |
639 | + |
640 | +async def test_run_command(app, jujutools): |
641 | + unit = app.units[0] |
642 | + cmd = 'hostname -i' |
643 | + results = await jujutools.run_command(cmd, unit) |
644 | + assert results['Code'] == '0' |
645 | + assert unit.public_address in results['Stdout'] |
646 | + |
647 | + |
648 | +async def test_file_stat(app, jujutools): |
649 | + unit = app.units[0] |
650 | + path = '/var/lib/juju/agents/unit-{}/charm/metadata.yaml'.format(unit.entity_id.replace('/', '-')) |
651 | + fstat = await jujutools.file_stat(path, unit) |
652 | + assert stat.filemode(fstat.st_mode) == '-rw-r--r--' |
653 | + assert fstat.st_uid == 0 |
654 | + assert fstat.st_gid == 0 |
655 | diff --git a/tests/unit/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc b/tests/unit/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc |
656 | new file mode 100644 |
657 | index 0000000..f22ae79 |
658 | Binary files /dev/null and b/tests/unit/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc differ |
659 | diff --git a/tests/unit/__pycache__/test_actions.cpython-37-pytest-5.3.2.pyc b/tests/unit/__pycache__/test_actions.cpython-37-pytest-5.3.2.pyc |
660 | new file mode 100644 |
661 | index 0000000..dec8696 |
662 | Binary files /dev/null and b/tests/unit/__pycache__/test_actions.cpython-37-pytest-5.3.2.pyc differ |
663 | diff --git a/tests/unit/__pycache__/test_lib.cpython-37-pytest-5.3.2.pyc b/tests/unit/__pycache__/test_lib.cpython-37-pytest-5.3.2.pyc |
664 | new file mode 100644 |
665 | index 0000000..0ef154b |
666 | Binary files /dev/null and b/tests/unit/__pycache__/test_lib.cpython-37-pytest-5.3.2.pyc differ |
667 | diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py |
668 | new file mode 100644 |
669 | index 0000000..a2a5470 |
670 | --- /dev/null |
671 | +++ b/tests/unit/conftest.py |
672 | @@ -0,0 +1,69 @@ |
673 | +#!/usr/bin/python3 |
674 | +import mock |
675 | +import pytest |
676 | + |
677 | + |
678 | +# If layer options are used, add this to ${fixture} |
679 | +# and import layer in ${libfile} |
680 | +@pytest.fixture |
681 | +def mock_layers(monkeypatch): |
682 | + import sys |
683 | + sys.modules['charms.layer'] = mock.Mock() |
684 | + sys.modules['reactive'] = mock.Mock() |
685 | + # Mock any functions in layers that need to be mocked here |
686 | + |
687 | + def options(layer): |
688 | + # mock options for layers here |
689 | + if layer == 'example-layer': |
690 | + options = {'port': 9999} |
691 | + return options |
692 | + else: |
693 | + return None |
694 | + |
695 | + monkeypatch.setattr('${libfile}.layer.options', options) |
696 | + |
697 | + |
698 | +@pytest.fixture |
699 | +def mock_hookenv_config(monkeypatch): |
700 | + import yaml |
701 | + |
702 | + def mock_config(): |
703 | + cfg = {} |
704 | + yml = yaml.load(open('./config.yaml')) |
705 | + |
706 | + # Load all defaults |
707 | + for key, value in yml['options'].items(): |
708 | + cfg[key] = value['default'] |
709 | + |
710 | + # Manually add cfg from other layers |
711 | + # cfg['my-other-layer'] = 'mock' |
712 | + return cfg |
713 | + |
714 | + monkeypatch.setattr('${libfile}.hookenv.config', mock_config) |
715 | + |
716 | + |
717 | +@pytest.fixture |
718 | +def mock_remote_unit(monkeypatch): |
719 | + monkeypatch.setattr('${libfile}.hookenv.remote_unit', lambda: 'unit-mock/0') |
720 | + |
721 | + |
722 | +@pytest.fixture |
723 | +def mock_charm_dir(monkeypatch): |
724 | + monkeypatch.setattr('${libfile}.hookenv.charm_dir', lambda: '/mock/charm/dir') |
725 | + |
726 | + |
727 | +# @pytest.fixture |
728 | +# def ${fixture}(tmpdir, mock_hookenv_config, mock_charm_dir, monkeypatch): |
729 | +# from $libfile import $libclass |
730 | +# helper = ${libclass}() |
731 | +# |
732 | +# # Example config file patching |
733 | +# cfg_file = tmpdir.join('example.cfg') |
734 | +# with open('./tests/unit/example.cfg', 'r') as src_file: |
735 | +# cfg_file.write(src_file.read()) |
736 | +# helper.example_config_file = cfg_file.strpath |
737 | +# |
738 | +# # Any other functions that load helper will get this version |
739 | +# monkeypatch.setattr('${libfile}.${libclass}', lambda: helper) |
740 | +# |
741 | +# return helper |
742 | diff --git a/tests/unit/example.cfg b/tests/unit/example.cfg |
743 | new file mode 100644 |
744 | index 0000000..81b1e94 |
745 | --- /dev/null |
746 | +++ b/tests/unit/example.cfg |
747 | @@ -0,0 +1 @@ |
748 | +This is an example config file included with the unit tests |
749 | diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt |
750 | new file mode 100644 |
751 | index 0000000..9c685e5 |
752 | --- /dev/null |
753 | +++ b/tests/unit/requirements.txt |
754 | @@ -0,0 +1,5 @@ |
755 | +charmhelpers |
756 | +charms.reactive |
757 | +mock |
758 | +pytest |
759 | +pytest-cov |
760 | diff --git a/tests/unit/test_actions.py b/tests/unit/test_actions.py |
761 | new file mode 100644 |
762 | index 0000000..6e7cddb |
763 | --- /dev/null |
764 | +++ b/tests/unit/test_actions.py |
765 | @@ -0,0 +1,12 @@ |
766 | +import imp |
767 | + |
768 | +import mock |
769 | + |
770 | + |
771 | +class TestActions(): |
772 | + def test_example_action(self, my_action, monkeypatch): |
773 | + mock_function = mock.Mock() |
774 | + monkeypatch.setattr(my_action, 'action_function', mock_function) |
775 | + assert mock_function.call_count == 0 |
776 | + imp.load_source('action_function', './actions/example-action') |
777 | + assert mock_function.call_count == 1 |
778 | diff --git a/tests/unit/test_lib.py b/tests/unit/test_lib.py |
779 | new file mode 100644 |
780 | index 0000000..a7b2b08 |
781 | --- /dev/null |
782 | +++ b/tests/unit/test_lib.py |
783 | @@ -0,0 +1,12 @@ |
784 | +#!/usr/bin/python3 |
785 | + |
786 | + |
787 | +class TestLib(): |
788 | + def test_pytest(self): |
789 | + assert True |
790 | + |
791 | + def test_grafana(self, grafana): |
792 | + """See if the helper fixture works to load charm configs.""" |
793 | + assert isinstance(grafana.charm_config, dict) |
794 | + |
795 | + # Include tests for functions in ${libfile} |
796 | diff --git a/tox.ini b/tox.ini |
797 | new file mode 100644 |
798 | index 0000000..8b4adc3 |
799 | --- /dev/null |
800 | +++ b/tox.ini |
801 | @@ -0,0 +1,71 @@ |
802 | +[tox] |
803 | +skipsdist=True |
804 | +envlist = unit, functional |
805 | +skip_missing_interpreters = True |
806 | + |
807 | +[testenv] |
808 | +basepython = python3 |
809 | +setenv = |
810 | + PYTHONPATH = . |
811 | + |
812 | +[testenv:unit] |
813 | +commands = pytest -v --ignore {toxinidir}/tests/functional \ |
814 | + --cov=lib \ |
815 | + --cov=reactive \ |
816 | + --cov=actions \ |
817 | + --cov-report=term \ |
818 | + --cov-report=annotate:report/annotated \ |
819 | + --cov-report=html:report/html |
820 | +deps = -r{toxinidir}/tests/unit/requirements.txt |
821 | + -r{toxinidir}/requirements.txt |
822 | +setenv = PYTHONPATH={toxinidir}/lib |
823 | + |
824 | +[testenv:functional] |
825 | +passenv = |
826 | + HOME |
827 | + JUJU_REPOSITORY |
828 | + PATH |
829 | + PYTEST_KEEP_MODEL |
830 | + PYTEST_CLOUD_NAME |
831 | + PYTEST_CLOUD_REGION |
832 | +commands = pytest -v --ignore {toxinidir}/tests/unit |
833 | +deps = -r{toxinidir}/tests/functional/requirements.txt |
834 | + -r{toxinidir}/requirements.txt |
835 | + |
836 | +[testenv:lint] |
837 | +commands = flake8 |
838 | +deps = |
839 | + flake8 |
840 | + flake8-docstrings |
841 | + flake8-import-order |
842 | + pep8-naming |
843 | + flake8-colors |
844 | + |
845 | +[flake8] |
846 | +exclude = |
847 | + .git, |
848 | + __pycache__, |
849 | + .tox, |
850 | +# H405: Multi line docstrings should start with a one line summary followed by |
851 | +# an empty line. |
852 | +# D100: Missing docstring in public module |
853 | +# D101: Missing docstring in public class |
854 | +# D102: Missing docstring in public method |
855 | +# D103: Missing docstring in public function |
856 | +# D104: Missing docstring in public package |
857 | +# D105: Missing docstring in magic method |
858 | +# D107: Missing docstring in __init__ |
859 | +# D200: One-line docstring should fit on one line with quotes |
860 | +# D202: No blank lines allowed after function docstring |
861 | +# D203: 1 blank required before class docstring |
862 | +# D204: 1 blank line required after class docstring |
863 | +# D205: 1 blank line required between summary line and description |
864 | +# D208: Docstring is over-indented |
865 | +# D400: First line should end with a period |
866 | +# D401: First line should be in imperative mood |
867 | +# I201: Missing newline between import groups |
868 | +# I100: Import statements are in the wrong order |
869 | + |
870 | +ignore = H405,D100,D101,D102,D103,D104,D105,D107,D200,D202,D203,D204,D205,D208,D400,D401,I100,I201 |
871 | +max-line-length = 120 |
872 | +max-complexity = 10 |
873 | diff --git a/wheelhouse.txt b/wheelhouse.txt |
874 | index f229360..ffdadef 100644 |
875 | --- a/wheelhouse.txt |
876 | +++ b/wheelhouse.txt |
877 | @@ -1 +1,2 @@ |
878 | requests |
879 | +jsondiff |
This merge proposal is being monitored by mergebot. Change the status to Approved to merge.