Merge ~afreiberger/charm-grafana:add_tests_and_linting into charm-grafana:master

Proposed by Drew Freiberger
Status: Superseded
Proposed branch: ~afreiberger/charm-grafana:add_tests_and_linting
Merge into: charm-grafana:master
Diff against target: 879 lines (+583/-53)
16 files modified
.gitignore (+22/-0)
Makefile (+49/-0)
lib/charms/layer/grafana.py (+2/-3)
reactive/grafana.py (+94/-50)
requirements.txt (+1/-0)
tests/functional/conftest.py (+67/-0)
tests/functional/juju_tools.py (+68/-0)
tests/functional/requirements.txt (+6/-0)
tests/functional/test_deploy.py (+103/-0)
tests/unit/conftest.py (+69/-0)
tests/unit/example.cfg (+1/-0)
tests/unit/requirements.txt (+5/-0)
tests/unit/test_actions.py (+12/-0)
tests/unit/test_lib.py (+12/-0)
tox.ini (+71/-0)
wheelhouse.txt (+1/-0)
Reviewer Review Type Date Requested Status
Canonical IS Reviewers Pending
Llama (LMA) Charmers Pending
Review via email: mp+377471@code.launchpad.net

This proposal has been superseded by a proposal from 2020-01-11.

To post a comment you must log in.
Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

This merge proposal is being monitored by mergebot. Change the status to Approved to merge.

Revision history for this message
🤖 Canonical IS Merge Bot (canonical-is-mergebot) wrote :

Unable to determine commit message from repository - please click "Set commit message" and enter the commit message manually.

Unmerged commits

d3370e1... by Drew Freiberger

Added testing and resolved lint errors

WIP

f2a62d8... by Drew Freiberger

Check dashboards before uploading new revisions

It was found that dashboards were being uploaded and creating unbounded
revision history once every 5 minutes during update-status causing the
grafana.db configuration database to balloon. To eliminate this, we
now validate that the rendered dashboard template result is not already
the version available in the grafana database before uploading.

Some refactoring of the dashboard function has been made to resolve
complexity warnings.

Closes-Bug: 1858490

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
diff --git a/.gitignore b/.gitignore
0new file mode 1006440new file mode 100644
index 0000000..32e2995
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,22 @@
1# Byte-compiled / optimized / DLL files
2__pycache__/
3*.py[cod]
4*$py.class
5
6# Log files
7*.log
8
9.tox/
10.coverage
11
12# vi
13.*.swp
14
15# pycharm
16.idea/
17
18# version data
19repo-info
20
21# reports
22report/*
diff --git a/Makefile b/Makefile
0new file mode 10064423new file mode 100644
index 0000000..b357248
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,49 @@
1help:
2 @echo "This project supports the following targets"
3 @echo ""
4 @echo " make help - show this text"
5 @echo " make submodules - make sure that the submodules are up-to-date"
6 @echo " make lint - run flake8"
7 @echo " make test - run the unittests and lint"
8 @echo " make unittest - run the tests defined in the unittest subdirectory"
9 @echo " make functional - run the tests defined in the functional subdirectory"
10 @echo " make release - build the charm"
11 @echo " make clean - remove unneeded files"
12 @echo ""
13
14submodules:
15 @echo "Cloning submodules"
16 @git submodule update --init --recursive
17
18lint:
19 @echo "Running flake8"
20 @tox -e lint
21
22test: lint unittest functional
23
24unittest:
25 @tox -e unit
26
27functional: build
28 @PYTEST_KEEP_MODEL=$(PYTEST_KEEP_MODEL) \
29 PYTEST_CLOUD_NAME=$(PYTEST_CLOUD_NAME) \
30 PYTEST_CLOUD_REGION=$(PYTEST_CLOUD_REGION) \
31 tox -e functional
32
33build:
34 @echo "Building charm to base directory $(JUJU_REPOSITORY)"
35 @-git describe --tags > ./repo-info
36 @CHARM_LAYERS_DIR=./layers CHARM_INTERFACES_DIR=./interfaces TERM=linux \
37 JUJU_REPOSITORY=$(JUJU_REPOSITORY) charm build . --force
38
39release: clean build
40 @echo "Charm is built at $(JUJU_REPOSITORY)/builds"
41
42clean:
43 @echo "Cleaning files"
44 @if [ -d .tox ] ; then rm -r .tox ; fi
45 @if [ -d .pytest_cache ] ; then rm -r .pytest_cache ; fi
46 @find . -iname __pycache__ -exec rm -r {} +
47
48# The targets below don't depend on a file
49.PHONY: lint test unittest functional build release clean help submodules
diff --git a/lib/charms/layer/grafana.py b/lib/charms/layer/grafana.py
index b482203..67b53ea 100644
--- a/lib/charms/layer/grafana.py
+++ b/lib/charms/layer/grafana.py
@@ -2,13 +2,12 @@
22
3import json3import json
4import requests4import requests
5from charmhelpers.core import unitdata
5from charmhelpers.core.hookenv import (6from charmhelpers.core.hookenv import (
6 config,7 config,
7 log,8 log,
8)9)
910
10from charmhelpers.core import unitdata
11
1211
13def get_admin_password():12def get_admin_password():
14 kv = unitdata.kv()13 kv = unitdata.kv()
@@ -30,7 +29,7 @@ def import_dashboard(dashboard, name=None):
30 name = dashboard['dashboard'].get('title') or 'Untitled'29 name = dashboard['dashboard'].get('title') or 'Untitled'
31 headers = {'Content-Type': 'application/json'}30 headers = {'Content-Type': 'application/json'}
32 import_url = 'http://localhost:{}/api/dashboards/db'.format(31 import_url = 'http://localhost:{}/api/dashboards/db'.format(
33 config('port'))32 config('port'))
34 passwd = get_admin_password()33 passwd = get_admin_password()
35 if passwd is None:34 if passwd is None:
36 return (False, 'Unable to retrieve grafana password.')35 return (False, 'Unable to retrieve grafana password.')
diff --git a/reactive/grafana.py b/reactive/grafana.py
index e1dc444..3f24f0a 100644
--- a/reactive/grafana.py
+++ b/reactive/grafana.py
@@ -4,12 +4,11 @@ import glob
4import json4import json
5import os5import os
6import re6import re
7import requests
8import shutil7import shutil
9import six
10import subprocess8import subprocess
11import time9import time
1210
11from charmhelpers import fetch
13from charmhelpers.contrib.charmsupport import nrpe12from charmhelpers.contrib.charmsupport import nrpe
14from charmhelpers.core import (13from charmhelpers.core import (
15 hookenv,14 hookenv,
@@ -17,11 +16,9 @@ from charmhelpers.core import (
17 unitdata,16 unitdata,
18)17)
19from charmhelpers.core.templating import render18from charmhelpers.core.templating import render
20from charmhelpers import fetch19
21from charms.reactive.helpers import (20from charms.layer import snap
22 any_file_changed,21from charms.layer.grafana import import_dashboard
23 is_state,
24)
25from charms.reactive import (22from charms.reactive import (
26 hook,23 hook,
27 remove_state,24 remove_state,
@@ -29,11 +26,20 @@ from charms.reactive import (
29 when,26 when,
30 when_not,27 when_not,
31)28)
29from charms.reactive.helpers import (
30 any_file_changed,
31 is_state,
32)
3233
33from charms.layer import snap
34from charms.layer.grafana import import_dashboard
35from jinja2 import Environment, FileSystemLoader, exceptions34from jinja2 import Environment, FileSystemLoader, exceptions
3635
36from jsondiff import diff
37
38import requests
39
40import six
41
42
37SVCNAME = {'snap': 'snap.grafana.grafana',43SVCNAME = {'snap': 'snap.grafana.grafana',
38 'apt': 'grafana-server'}44 'apt': 'grafana-server'}
39SNAP_NAME = 'grafana'45SNAP_NAME = 'grafana'
@@ -119,8 +125,7 @@ def install_packages():
119 set_state('grafana.installed')125 set_state('grafana.installed')
120 hookenv.status_set('active', 'Completed installing grafana')126 hookenv.status_set('active', 'Completed installing grafana')
121 elif source == 'snap' and \127 elif source == 'snap' and \
122 (host.lsb_release()['DISTRIB_CODENAME'] >= 'xenial' or128 (host.lsb_release()['DISTRIB_CODENAME'] >= 'xenial' or host.lsb_release()['DISTRIB_CODENAME'] < 'p'):
123 host.lsb_release()['DISTRIB_CODENAME'] < 'p'):
124 # NOTE(aluria): precise is the last supported Ubuntu release, so129 # NOTE(aluria): precise is the last supported Ubuntu release, so
125 # anything below 'p' is actually newer than xenial (systemd support)130 # anything below 'p' is actually newer than xenial (systemd support)
126 snap.install(SNAP_NAME, channel=channel, force_dangerous=False)131 snap.install(SNAP_NAME, channel=channel, force_dangerous=False)
@@ -432,8 +437,9 @@ def configure_website(website):
432437
433438
434def validate_datasources():439def validate_datasources():
435 """TODO: make sure datasources option is merged with440 """Verify that datasources configuration is valid, if existing.
436 relation data441
442 TODO: make sure datasources option is merged with relation data
437 TODO: make sure datasources are validated443 TODO: make sure datasources are validated
438 """444 """
439 config = hookenv.config()445 config = hookenv.config()
@@ -447,7 +453,8 @@ def validate_datasources():
447453
448454
449def check_datasource(ds):455def check_datasource(ds):
450 """456 """Check for and add datasources not currently in grafana DB.
457
451 CREATE TABLE `data_source` (458 CREATE TABLE `data_source` (
452 `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL459 `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL
453 , `org_id` INTEGER NOT NULL460 , `org_id` INTEGER NOT NULL
@@ -469,7 +476,6 @@ def check_datasource(ds):
469 , `with_credentials` INTEGER NOT NULL DEFAULT 0);476 , `with_credentials` INTEGER NOT NULL DEFAULT 0);
470 INSERT INTO "data_source" VALUES(1,1,0,'prometheus','BootStack Prometheus','proxy','http://localhost:9090','','','',0,'','',1,'{}','2016-01-22 12:11:06','2016-01-22 12:11:11',0);477 INSERT INTO "data_source" VALUES(1,1,0,'prometheus','BootStack Prometheus','proxy','http://localhost:9090','','','',0,'','',1,'{}','2016-01-22 12:11:06','2016-01-22 12:11:11',0);
471 """ # noqa E501478 """ # noqa E501
472
473 # ds will be similar to:479 # ds will be similar to:
474 # {'service_name': 'prometheus',480 # {'service_name': 'prometheus',
475 # 'url': 'http://10.0.3.216:9090',481 # 'url': 'http://10.0.3.216:9090',
@@ -504,9 +510,8 @@ def check_datasource(ds):
504510
505# This isn't exposed in charmhelpers: https://github.com/juju/charm-helpers/issues/367511# This isn't exposed in charmhelpers: https://github.com/juju/charm-helpers/issues/367
506def render_custom(source, context, **parameters):512def render_custom(source, context, **parameters):
507 """513 """Render a template from the template folder with custom environment parameters.
508 Renders a template from the template folder with custom environment514
509 parameters.
510 source: template file name to render from515 source: template file name to render from
511 context: template context variables516 context: template context variables
512 parameters: initialization parameters for the jinja Environment517 parameters: initialization parameters for the jinja Environment
@@ -523,6 +528,69 @@ def render_custom(source, context, **parameters):
523 return template.render(context)528 return template.render(context)
524529
525530
531def get_current_dashboards(port, passwd):
532 # Get current dashboards
533 get_req = 'http://127.0.0.1:{}/api/search?type=dash-db'.format(port)
534 response = requests.get(get_req, auth=('admin', passwd))
535 if response.status_code == 200:
536 return response.json()
537 return []
538
539
540def get_current_dashboard_json(uid, port, passwd):
541 if uid is not None:
542 response = requests.get("http://127.0.0.1:{}/api/dashboards/uid/{}".format(port, uid), auth=('admin', passwd))
543 if response.status_code == 200:
544 try:
545 return json.loads(response.text)["dashboard"]
546 except json.decoder.JSONDecodeError:
547 pass
548 return json.loads('{"version": 0}')
549
550
551def check_and_add_dashboard(filename, context, prom_metrics, dash_to_uid, port, gf_adminpasswd):
552 dashboard_str = render_custom(source=filename,
553 context=context,
554 variable_start_string="<<",
555 variable_end_string=">>",
556 )
557 hookenv.log("Checking Dashboard Template: {}".format(filename))
558 expr = str(re.findall('"expr":(.*),', dashboard_str))
559 metrics = set(re.findall('[a-zA-Z0-9]*_[a-zA-Z0-9_]*', expr))
560 if not metrics:
561 hookenv.log("Skipping Dashboard Template: {} no metrics in template"
562 " {}".format(filename, metrics))
563 return
564
565 missing_metrics = set([x for x in metrics if x not in prom_metrics])
566 if missing_metrics:
567 hookenv.log("Skipping Dashboard Template: {} missing {} metrics."
568 "Missing: {}".format(filename, len(missing_metrics),
569 ', '.join(missing_metrics)
570 ), hookenv.DEBUG)
571 return
572
573 dashboard_json = json.loads(dashboard_str)
574 # before uploading the dashboard, we should check that it doesn't already exist with same data lp#1858490
575 new = dashboard_json["dashboard"]
576 curr = get_current_dashboard_json(dash_to_uid.get(dashboard_json["dashboard"]["title"], None), port, gf_adminpasswd)
577 # must remove the versions as they will likely be different
578 del new["version"]
579 del curr["version"]
580 json_diff_result = diff(new, curr)
581 if json_diff_result:
582 hookenv.log("Skipping Dashboard Template: already up to date: {}".format(filename))
583 return
584
585 hookenv.log("Using Dashboard Template: {}".format(filename))
586 post_req = 'http://127.0.0.1:{}/api/dashboards/db'.format(port)
587 r = requests.post(post_req, json=dashboard_json, auth=('admin', gf_adminpasswd))
588
589 if r.status_code != 200:
590 hookenv.log("Posting template {} failed with error:"
591 " {}".format(filename, r.text), 'ERROR')
592
593
526def generate_prometheus_dashboards(gf_adminpasswd, ds):594def generate_prometheus_dashboards(gf_adminpasswd, ds):
527 # prometheus_host = ds595 # prometheus_host = ds
528 ds_name = '{} - {}'.format(ds['service_name'], ds['description'])596 ds_name = '{} - {}'.format(ds['service_name'], ds['description'])
@@ -534,9 +602,11 @@ def generate_prometheus_dashboards(gf_adminpasswd, ds):
534 ' {}'.format(response.status_code), 'ERROR')602 ' {}'.format(response.status_code), 'ERROR')
535 return603 return
536604
605 current_dashboards = get_current_dashboards(config["port"], gf_adminpasswd)
606 dash_to_uid = {dash['title']: dash['uid'] for dash in current_dashboards if 'title' in dash and 'uid' in dash}
607
537 prom_metrics = response.json()['data']608 prom_metrics = response.json()['data']
538 templates_dir = 'templates/dashboards/prometheus'609 templates_dir = 'templates/dashboards/prometheus'
539 post_req = 'http://127.0.0.1:{}/api/dashboards/db'.format(config['port'])
540 context = {'datasource': ds_name,610 context = {'datasource': ds_name,
541 'external_network': config['external_network'],611 'external_network': config['external_network'],
542 'bcache_enabled': "bcache_cache_hit_ratio" in prom_metrics,612 'bcache_enabled': "bcache_cache_hit_ratio" in prom_metrics,
@@ -551,33 +621,9 @@ def generate_prometheus_dashboards(gf_adminpasswd, ds):
551 'ip_status', 'neutron_net', config['external_network'],621 'ip_status', 'neutron_net', config['external_network'],
552 'neutron_public_ip_usage']622 'neutron_public_ip_usage']
553 prom_metrics.extend(ignore_metrics)623 prom_metrics.extend(ignore_metrics)
554 for filename in os.listdir(templates_dir):
555 dashboard_str = render_custom(source=filename,
556 context=context,
557 variable_start_string="<<",
558 variable_end_string=">>",
559 )
560 hookenv.log("Checking Dashboard Template: {}".format(filename))
561 expr = str(re.findall('"expr":(.*),', dashboard_str))
562 metrics = set(re.findall('[a-zA-Z0-9]*_[a-zA-Z0-9_]*', expr))
563 if not metrics:
564 hookenv.log("Skipping Dashboard Template: {} no metrics in template"
565 " {}".format(filename, metrics))
566 continue
567 missing_metrics = set([x for x in metrics if x not in prom_metrics])
568 if missing_metrics:
569 hookenv.log("Skipping Dashboard Template: {} missing {} metrics."
570 "Missing: {}".format(filename, len(missing_metrics),
571 ', '.join(missing_metrics)
572 ), hookenv.DEBUG)
573 else:
574 hookenv.log("Using Dashboard Template: {}".format(filename))
575 dashboard_json = json.loads(dashboard_str)
576 r = requests.post(post_req, json=dashboard_json, auth=('admin', gf_adminpasswd))
577624
578 if r.status_code != 200:625 for filename in os.listdir(templates_dir):
579 hookenv.log("Posting template {} failed with error:"626 check_and_add_dashboard(filename, context, prom_metrics, dash_to_uid, config["port"], gf_adminpasswd)
580 " {}".format(filename, r.text), 'ERROR')
581627
582628
583def generate_query(ds, is_default, id=None):629def generate_query(ds, is_default, id=None):
@@ -661,7 +707,8 @@ def generate_query(ds, is_default, id=None):
661@when('grafana.started')707@when('grafana.started')
662@when_not('grafana.admin_password.set')708@when_not('grafana.admin_password.set')
663def check_adminuser():709def check_adminuser():
664 """710 """Create Adminuser if not existing.
711
665 CREATE TABLE `user` (712 CREATE TABLE `user` (
666 `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL713 `id` INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL
667 , `version` INTEGER NOT NULL714 , `version` INTEGER NOT NULL
@@ -681,7 +728,6 @@ def check_adminuser():
681 );728 );
682 INSERT INTO "user" VALUES(1,0,'admin','root+bootstack-ps45@canonical.com','BootStack Team','309bc4e78bc60d02dc0371d9e9fa6bf9a809d5dc25c745b9e3f85c3ed49c6feccd4ffc96d1db922f4297663a209e93f7f2b6','LZeJ3nSdrC','hseJcLcnPN','',1,1,0,'light','2016-01-22 12:00:08','2016-01-22 12:02:13');729 INSERT INTO "user" VALUES(1,0,'admin','root+bootstack-ps45@canonical.com','BootStack Team','309bc4e78bc60d02dc0371d9e9fa6bf9a809d5dc25c745b9e3f85c3ed49c6feccd4ffc96d1db922f4297663a209e93f7f2b6','LZeJ3nSdrC','hseJcLcnPN','',1,1,0,'light','2016-01-22 12:00:08','2016-01-22 12:02:13');
683 """ # noqa E501730 """ # noqa E501
684
685 # XXX: If you add any dependencies on config items here,731 # XXX: If you add any dependencies on config items here,
686 # be sure to update config_changed() accordingly!732 # be sure to update config_changed() accordingly!
687733
@@ -717,9 +763,7 @@ def check_adminuser():
717 query = cur.execute('SELECT id, login, salt FROM user')763 query = cur.execute('SELECT id, login, salt FROM user')
718 for row in query.fetchall():764 for row in query.fetchall():
719 if row[1] == 'admin':765 if row[1] == 'admin':
720 nagios_context = config.get('nagios_context', False)766 nagios_context = config.get('nagios_context', 'UNKNOWN')
721 if not nagios_context:
722 nagios_context = 'UNKNOWN'
723 email = 'root+%s@canonical.com' % nagios_context767 email = 'root+%s@canonical.com' % nagios_context
724 hpasswd = hpwgen(passwd, row[2])768 hpasswd = hpwgen(passwd, row[2])
725 if hpasswd:769 if hpasswd:
diff --git a/requirements.txt b/requirements.txt
726new file mode 100644770new file mode 100644
index 0000000..8462291
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1 @@
1# Include python requirements here
diff --git a/tests/functional/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc b/tests/functional/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc
0new file mode 1006442new file mode 100644
index 0000000..54c4d14
1Binary files /dev/null and b/tests/functional/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc differ3Binary files /dev/null and b/tests/functional/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc differ
diff --git a/tests/functional/__pycache__/juju_tools.cpython-37.pyc b/tests/functional/__pycache__/juju_tools.cpython-37.pyc
2new file mode 1006444new file mode 100644
index 0000000..ce8ffdc
3Binary files /dev/null and b/tests/functional/__pycache__/juju_tools.cpython-37.pyc differ5Binary files /dev/null and b/tests/functional/__pycache__/juju_tools.cpython-37.pyc differ
diff --git a/tests/functional/__pycache__/test_deploy.cpython-37-pytest-5.3.2.pyc b/tests/functional/__pycache__/test_deploy.cpython-37-pytest-5.3.2.pyc
4new file mode 1006446new file mode 100644
index 0000000..27b64d7
5Binary files /dev/null and b/tests/functional/__pycache__/test_deploy.cpython-37-pytest-5.3.2.pyc differ7Binary files /dev/null and b/tests/functional/__pycache__/test_deploy.cpython-37-pytest-5.3.2.pyc differ
diff --git a/tests/functional/conftest.py b/tests/functional/conftest.py
6new file mode 1006448new file mode 100644
index 0000000..1d2d6e9
--- /dev/null
+++ b/tests/functional/conftest.py
@@ -0,0 +1,67 @@
1#!/usr/bin/python3
2"""
3Reusable pytest fixtures for functional testing
4
5Environment variables
6---------------------
7
8PYTEST_CLOUD_REGION, PYTEST_CLOUD_NAME: cloud name and region to use for juju model creation
9
10PYTEST_KEEP_MODEL: if set, the testing model won't be torn down at the end of the testing session
11
12"""
13
14import asyncio
15import os
16import uuid
17import pytest
18import subprocess
19
20from juju.controller import Controller
21from juju_tools import JujuTools
22
23
24@pytest.fixture(scope='module')
25def event_loop():
26 """Override the default pytest event loop to allow for fixtures using a broader scope"""
27 loop = asyncio.get_event_loop_policy().new_event_loop()
28 asyncio.set_event_loop(loop)
29 loop.set_debug(True)
30 yield loop
31 loop.close()
32 asyncio.set_event_loop(None)
33
34
35@pytest.fixture(scope='module')
36async def controller():
37 """Connect to the current controller"""
38 _controller = Controller()
39 await _controller.connect_current()
40 yield _controller
41 await _controller.disconnect()
42
43
44@pytest.fixture(scope='module')
45async def model(controller):
46 """This model lives only for the duration of the test"""
47 model_name = "functest-{}".format(str(uuid.uuid4())[-12:])
48 _model = await controller.add_model(model_name,
49 cloud_name=os.getenv('PYTEST_CLOUD_NAME'),
50 region=os.getenv('PYTEST_CLOUD_REGION'),
51 )
52 # https://github.com/juju/python-libjuju/issues/267
53 subprocess.check_call(['juju', 'models'])
54 while model_name not in await controller.list_models():
55 await asyncio.sleep(1)
56 yield _model
57 await _model.disconnect()
58 if not os.getenv('PYTEST_KEEP_MODEL'):
59 await controller.destroy_model(model_name)
60 while model_name in await controller.list_models():
61 await asyncio.sleep(1)
62
63
64@pytest.fixture(scope='module')
65async def jujutools(controller, model):
66 tools = JujuTools(controller, model)
67 return tools
diff --git a/tests/functional/juju_tools.py b/tests/functional/juju_tools.py
0new file mode 10064468new file mode 100644
index 0000000..4b4884f
--- /dev/null
+++ b/tests/functional/juju_tools.py
@@ -0,0 +1,68 @@
1import pickle
2import juju
3import base64
4
5# from juju.errors import JujuError
6
7
8class JujuTools:
9 def __init__(self, controller, model):
10 self.controller = controller
11 self.model = model
12
13 async def run_command(self, cmd, target):
14 """
15 Runs a command on a unit.
16
17 :param cmd: Command to be run
18 :param unit: Unit object or unit name string
19 """
20 unit = (
21 target
22 if isinstance(target, juju.unit.Unit)
23 else await self.get_unit(target)
24 )
25 action = await unit.run(cmd)
26 return action.results
27
28 async def remote_object(self, imports, remote_cmd, target):
29 """
30 Runs command on target machine and returns a python object of the result
31
32 :param imports: Imports needed for the command to run
33 :param remote_cmd: The python command to execute
34 :param target: Unit object or unit name string
35 """
36 python3 = "python3 -c '{}'"
37 python_cmd = ('import pickle;'
38 'import base64;'
39 '{}'
40 'print(base64.b64encode(pickle.dumps({})), end="")'
41 .format(imports, remote_cmd))
42 cmd = python3.format(python_cmd)
43 results = await self.run_command(cmd, target)
44 return pickle.loads(base64.b64decode(bytes(results['Stdout'][2:-1], 'utf8')))
45
46 async def file_stat(self, path, target):
47 """
48 Runs stat on a file
49
50 :param path: File path
51 :param target: Unit object or unit name string
52 """
53 imports = 'import os;'
54 python_cmd = ('os.stat("{}")'
55 .format(path))
56 print("Calling remote cmd: " + python_cmd)
57 return await self.remote_object(imports, python_cmd, target)
58
59 async def file_contents(self, path, target):
60 """
61 Returns the contents of a file
62
63 :param path: File path
64 :param target: Unit object or unit name string
65 """
66 cmd = 'cat {}'.format(path)
67 result = await self.run_command(cmd, target)
68 return result['Stdout']
diff --git a/tests/functional/requirements.txt b/tests/functional/requirements.txt
0new file mode 10064469new file mode 100644
index 0000000..f76bfbb
--- /dev/null
+++ b/tests/functional/requirements.txt
@@ -0,0 +1,6 @@
1flake8
2juju
3mock
4pytest
5pytest-asyncio
6requests
diff --git a/tests/functional/test_deploy.py b/tests/functional/test_deploy.py
0new file mode 1006447new file mode 100644
index 0000000..bf28c17
--- /dev/null
+++ b/tests/functional/test_deploy.py
@@ -0,0 +1,103 @@
1import os
2import pytest
3import subprocess
4import stat
5
6# Treat all tests as coroutines
7pytestmark = pytest.mark.asyncio
8
9juju_repository = os.getenv('JUJU_REPOSITORY', '.').rstrip('/')
10series = ['xenial',
11 'bionic',
12 pytest.param('eoan', marks=pytest.mark.xfail(reason='canary')),
13 ]
14sources = [('local', '{}/builds/grafana'.format(juju_repository)),
15 # ('jujucharms', 'cs:...'),
16 ]
17
18
19# Uncomment for re-using the current model, useful for debugging functional tests
20# @pytest.fixture(scope='module')
21# async def model():
22# from juju.model import Model
23# model = Model()
24# await model.connect_current()
25# yield model
26# await model.disconnect()
27
28
29# Custom fixtures
30@pytest.fixture(params=series)
31def series(request):
32 return request.param
33
34
35@pytest.fixture(params=sources, ids=[s[0] for s in sources])
36def source(request):
37 return request.param
38
39
40@pytest.fixture
41async def app(model, series, source):
42 app_name = 'grafana-{}-{}'.format(series, source[0])
43 return await model._wait_for_new('application', app_name)
44
45
46async def test_grafana_deploy(model, series, source, request):
47 # Starts a deploy for each series
48 # Using subprocess b/c libjuju fails with JAAS
49 # https://github.com/juju/python-libjuju/issues/221
50 application_name = 'grafana-{}-{}'.format(series, source[0])
51 cmd = ['juju', 'deploy', source[1], '-m', model.info.name,
52 '--series', series, application_name]
53 if request.node.get_closest_marker('xfail'):
54 # If series is 'xfail' force install to allow testing against versions not in
55 # metadata.yaml
56 cmd.append('--force')
57 subprocess.check_call(cmd)
58
59
60async def test_charm_upgrade(model, app):
61 if app.name.endswith('local'):
62 pytest.skip("No need to upgrade the local deploy")
63 unit = app.units[0]
64 await model.block_until(lambda: unit.agent_status == 'idle')
65 subprocess.check_call(['juju',
66 'upgrade-charm',
67 '--switch={}'.format(sources[0][1]),
68 '-m', model.info.name,
69 app.name,
70 ])
71 await model.block_until(lambda: unit.agent_status == 'executing')
72
73
74# Tests
75async def test_grafana_status(model, app):
76 # Verifies status for all deployed series of the charm
77 await model.block_until(lambda: app.status == 'active')
78 unit = app.units[0]
79 await model.block_until(lambda: unit.agent_status == 'idle')
80
81
82async def test_example_action(app):
83 unit = app.units[0]
84 action = await unit.run_action('example-action')
85 action = await action.wait()
86 assert action.status == 'completed'
87
88
89async def test_run_command(app, jujutools):
90 unit = app.units[0]
91 cmd = 'hostname -i'
92 results = await jujutools.run_command(cmd, unit)
93 assert results['Code'] == '0'
94 assert unit.public_address in results['Stdout']
95
96
97async def test_file_stat(app, jujutools):
98 unit = app.units[0]
99 path = '/var/lib/juju/agents/unit-{}/charm/metadata.yaml'.format(unit.entity_id.replace('/', '-'))
100 fstat = await jujutools.file_stat(path, unit)
101 assert stat.filemode(fstat.st_mode) == '-rw-r--r--'
102 assert fstat.st_uid == 0
103 assert fstat.st_gid == 0
diff --git a/tests/unit/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc b/tests/unit/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc
0new file mode 100644104new file mode 100644
index 0000000..f22ae79
1Binary files /dev/null and b/tests/unit/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc differ105Binary files /dev/null and b/tests/unit/__pycache__/conftest.cpython-37-pytest-5.3.2.pyc differ
diff --git a/tests/unit/__pycache__/test_actions.cpython-37-pytest-5.3.2.pyc b/tests/unit/__pycache__/test_actions.cpython-37-pytest-5.3.2.pyc
2new file mode 100644106new file mode 100644
index 0000000..dec8696
3Binary files /dev/null and b/tests/unit/__pycache__/test_actions.cpython-37-pytest-5.3.2.pyc differ107Binary files /dev/null and b/tests/unit/__pycache__/test_actions.cpython-37-pytest-5.3.2.pyc differ
diff --git a/tests/unit/__pycache__/test_lib.cpython-37-pytest-5.3.2.pyc b/tests/unit/__pycache__/test_lib.cpython-37-pytest-5.3.2.pyc
4new file mode 100644108new file mode 100644
index 0000000..0ef154b
5Binary files /dev/null and b/tests/unit/__pycache__/test_lib.cpython-37-pytest-5.3.2.pyc differ109Binary files /dev/null and b/tests/unit/__pycache__/test_lib.cpython-37-pytest-5.3.2.pyc differ
diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py
6new file mode 100644110new file mode 100644
index 0000000..a2a5470
--- /dev/null
+++ b/tests/unit/conftest.py
@@ -0,0 +1,69 @@
1#!/usr/bin/python3
2import mock
3import pytest
4
5
6# If layer options are used, add this to ${fixture}
7# and import layer in ${libfile}
8@pytest.fixture
9def mock_layers(monkeypatch):
10 import sys
11 sys.modules['charms.layer'] = mock.Mock()
12 sys.modules['reactive'] = mock.Mock()
13 # Mock any functions in layers that need to be mocked here
14
15 def options(layer):
16 # mock options for layers here
17 if layer == 'example-layer':
18 options = {'port': 9999}
19 return options
20 else:
21 return None
22
23 monkeypatch.setattr('${libfile}.layer.options', options)
24
25
26@pytest.fixture
27def mock_hookenv_config(monkeypatch):
28 import yaml
29
30 def mock_config():
31 cfg = {}
32 yml = yaml.load(open('./config.yaml'))
33
34 # Load all defaults
35 for key, value in yml['options'].items():
36 cfg[key] = value['default']
37
38 # Manually add cfg from other layers
39 # cfg['my-other-layer'] = 'mock'
40 return cfg
41
42 monkeypatch.setattr('${libfile}.hookenv.config', mock_config)
43
44
45@pytest.fixture
46def mock_remote_unit(monkeypatch):
47 monkeypatch.setattr('${libfile}.hookenv.remote_unit', lambda: 'unit-mock/0')
48
49
50@pytest.fixture
51def mock_charm_dir(monkeypatch):
52 monkeypatch.setattr('${libfile}.hookenv.charm_dir', lambda: '/mock/charm/dir')
53
54
55# @pytest.fixture
56# def ${fixture}(tmpdir, mock_hookenv_config, mock_charm_dir, monkeypatch):
57# from $libfile import $libclass
58# helper = ${libclass}()
59#
60# # Example config file patching
61# cfg_file = tmpdir.join('example.cfg')
62# with open('./tests/unit/example.cfg', 'r') as src_file:
63# cfg_file.write(src_file.read())
64# helper.example_config_file = cfg_file.strpath
65#
66# # Any other functions that load helper will get this version
67# monkeypatch.setattr('${libfile}.${libclass}', lambda: helper)
68#
69# return helper
diff --git a/tests/unit/example.cfg b/tests/unit/example.cfg
0new file mode 10064470new file mode 100644
index 0000000..81b1e94
--- /dev/null
+++ b/tests/unit/example.cfg
@@ -0,0 +1 @@
1This is an example config file included with the unit tests
diff --git a/tests/unit/requirements.txt b/tests/unit/requirements.txt
0new file mode 1006442new file mode 100644
index 0000000..9c685e5
--- /dev/null
+++ b/tests/unit/requirements.txt
@@ -0,0 +1,5 @@
1charmhelpers
2charms.reactive
3mock
4pytest
5pytest-cov
diff --git a/tests/unit/test_actions.py b/tests/unit/test_actions.py
0new file mode 1006446new file mode 100644
index 0000000..6e7cddb
--- /dev/null
+++ b/tests/unit/test_actions.py
@@ -0,0 +1,12 @@
1import imp
2
3import mock
4
5
6class TestActions():
7 def test_example_action(self, my_action, monkeypatch):
8 mock_function = mock.Mock()
9 monkeypatch.setattr(my_action, 'action_function', mock_function)
10 assert mock_function.call_count == 0
11 imp.load_source('action_function', './actions/example-action')
12 assert mock_function.call_count == 1
diff --git a/tests/unit/test_lib.py b/tests/unit/test_lib.py
0new file mode 10064413new file mode 100644
index 0000000..a7b2b08
--- /dev/null
+++ b/tests/unit/test_lib.py
@@ -0,0 +1,12 @@
1#!/usr/bin/python3
2
3
4class TestLib():
5 def test_pytest(self):
6 assert True
7
8 def test_grafana(self, grafana):
9 """See if the helper fixture works to load charm configs."""
10 assert isinstance(grafana.charm_config, dict)
11
12 # Include tests for functions in ${libfile}
diff --git a/tox.ini b/tox.ini
0new file mode 10064413new file mode 100644
index 0000000..8b4adc3
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,71 @@
1[tox]
2skipsdist=True
3envlist = unit, functional
4skip_missing_interpreters = True
5
6[testenv]
7basepython = python3
8setenv =
9 PYTHONPATH = .
10
11[testenv:unit]
12commands = pytest -v --ignore {toxinidir}/tests/functional \
13 --cov=lib \
14 --cov=reactive \
15 --cov=actions \
16 --cov-report=term \
17 --cov-report=annotate:report/annotated \
18 --cov-report=html:report/html
19deps = -r{toxinidir}/tests/unit/requirements.txt
20 -r{toxinidir}/requirements.txt
21setenv = PYTHONPATH={toxinidir}/lib
22
23[testenv:functional]
24passenv =
25 HOME
26 JUJU_REPOSITORY
27 PATH
28 PYTEST_KEEP_MODEL
29 PYTEST_CLOUD_NAME
30 PYTEST_CLOUD_REGION
31commands = pytest -v --ignore {toxinidir}/tests/unit
32deps = -r{toxinidir}/tests/functional/requirements.txt
33 -r{toxinidir}/requirements.txt
34
35[testenv:lint]
36commands = flake8
37deps =
38 flake8
39 flake8-docstrings
40 flake8-import-order
41 pep8-naming
42 flake8-colors
43
44[flake8]
45exclude =
46 .git,
47 __pycache__,
48 .tox,
49# H405: Multi line docstrings should start with a one line summary followed by
50# an empty line.
51# D100: Missing docstring in public module
52# D101: Missing docstring in public class
53# D102: Missing docstring in public method
54# D103: Missing docstring in public function
55# D104: Missing docstring in public package
56# D105: Missing docstring in magic method
57# D107: Missing docstring in __init__
58# D200: One-line docstring should fit on one line with quotes
59# D202: No blank lines allowed after function docstring
60# D203: 1 blank required before class docstring
61# D204: 1 blank line required after class docstring
62# D205: 1 blank line required between summary line and description
63# D208: Docstring is over-indented
64# D400: First line should end with a period
65# D401: First line should be in imperative mood
66# I201: Missing newline between import groups
67# I100: Import statements are in the wrong order
68
69ignore = H405,D100,D101,D102,D103,D104,D105,D107,D200,D202,D203,D204,D205,D208,D400,D401,I100,I201
70max-line-length = 120
71max-complexity = 10
diff --git a/wheelhouse.txt b/wheelhouse.txt
index f229360..ffdadef 100644
--- a/wheelhouse.txt
+++ b/wheelhouse.txt
@@ -1 +1,2 @@
1requests1requests
2jsondiff

Subscribers

People subscribed via source and target branches

to all changes: