Merge ~afreiberger/charm-hw-health:blacken-20.08 into charm-hw-health:master
- Git
- lp:~afreiberger/charm-hw-health
- blacken-20.08
- Merge into master
Proposed by
Drew Freiberger
Status: | Merged |
---|---|
Merged at revision: | 410c7d4c6a87b86a2e2b8209d2c3f739877bda48 |
Proposed branch: | ~afreiberger/charm-hw-health:blacken-20.08 |
Merge into: | charm-hw-health:master |
Prerequisite: | ~afreiberger/charm-hw-health:makefile-20.08 |
Diff against target: |
5526 lines (+1489/-1426) 37 files modified
src/actions/actions.py (+8/-8) src/files/common/check_hw_health_cron_output.py (+12/-10) src/files/common/hw_health_lib.py (+74/-73) src/files/hplog/cron_hplog.py (+81/-74) src/files/ilorest/check_ilorest.py (+6/-9) src/files/ilorest/cron_ilorest.py (+43/-41) src/files/ipmi/check_ipmi.py (+17/-11) src/files/ipmi/cron_ipmi_sensors.py (+16/-16) src/files/mdadm/check_mdadm.py (+8/-13) src/files/mdadm/cron_mdadm.py (+55/-64) src/files/megacli/check_megacli.py (+36/-40) src/files/nvme/check_nvme.py (+31/-22) src/files/sas2ircu/check_sas2ircu.py (+16/-19) src/files/sas3ircu/check_sas3ircu.py (+79/-90) src/files/ssacli/cron_ssacli.py (+39/-33) src/lib/hwhealth/discovery/lshw.py (+87/-87) src/lib/hwhealth/discovery/supported_vendors.py (+14/-19) src/lib/hwhealth/hwdiscovery.py (+26/-27) src/lib/hwhealth/tools.py (+174/-180) src/reactive/hw_health.py (+82/-73) src/tests/download_nagios_plugin3.py (+7/-6) src/tests/functional/conftest.py (+43/-34) src/tests/functional/test_hwhealth.py (+222/-192) src/tests/unit/lib/samples.py (+1/-6) src/tests/unit/test_actions.py (+113/-72) src/tests/unit/test_check_mdadm.py (+19/-30) src/tests/unit/test_check_megacli.py (+15/-17) src/tests/unit/test_check_nvme.py (+8/-8) src/tests/unit/test_check_sas2ircu.py (+5/-5) src/tests/unit/test_check_sas3ircu.py (+5/-5) src/tests/unit/test_cron_hplog.py (+10/-7) src/tests/unit/test_cron_ilorest.py (+12/-5) src/tests/unit/test_cron_mdadm.py (+65/-70) src/tests/unit/test_cron_ssacli.py (+20/-9) src/tests/unit/test_hwdiscovery.py (+27/-37) src/tests/unit/test_lshw.py (+11/-11) src/tox.ini (+2/-3) |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Xav Paice (community) | Approve | ||
Review via email: mp+388951@code.launchpad.net |
Commit message
Description of the change
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | diff --git a/src/actions/actions.py b/src/actions/actions.py |
2 | index e3f04cd..734841b 100755 |
3 | --- a/src/actions/actions.py |
4 | +++ b/src/actions/actions.py |
5 | @@ -21,7 +21,7 @@ import sys |
6 | from charmhelpers.core.hookenv import action_set, action_get, action_fail, log |
7 | |
8 | |
9 | -IPMI_SEL = '/usr/sbin/ipmi-sel' |
10 | +IPMI_SEL = "/usr/sbin/ipmi-sel" |
11 | |
12 | |
13 | def clear_sel(): |
14 | @@ -30,11 +30,11 @@ def clear_sel(): |
15 | Uses ipmi-sel --post-clear, clears the SEL log and stores the cleared entries |
16 | in action output. |
17 | """ |
18 | - command = [IPMI_SEL, '--post-clear'] |
19 | + command = [IPMI_SEL, "--post-clear"] |
20 | try: |
21 | output = subprocess.check_output(command) |
22 | log("Action clear-sel completed, sel log cleared: {}".format(output)) |
23 | - action_set({'message': output.decode('UTF-8')}) |
24 | + action_set({"message": output.decode("UTF-8")}) |
25 | except subprocess.CalledProcessError as e: |
26 | action_fail("Action failed with {}".format(e)) |
27 | |
28 | @@ -45,23 +45,23 @@ def show_sel(): |
29 | By default, this will show all non-nominal events. If you specify show-all, |
30 | it will show all events. |
31 | """ |
32 | - show_all = action_get('show-all') |
33 | - command = [IPMI_SEL, '--output-event-state'] |
34 | + show_all = action_get("show-all") |
35 | + command = [IPMI_SEL, "--output-event-state"] |
36 | try: |
37 | header, body = None, None |
38 | - output = subprocess.check_output(command).decode('UTF-8') |
39 | + output = subprocess.check_output(command).decode("UTF-8") |
40 | lines = output.splitlines() |
41 | if lines: |
42 | header, body = lines[0], lines[1:] |
43 | if not show_all: |
44 | # This is fairly naive, but it may be good enough for now |
45 | - body = [line for line in body if 'Nominal' not in line] |
46 | + body = [line for line in body if "Nominal" not in line] |
47 | if body: |
48 | final_output = "\n".join([header] + body) |
49 | else: |
50 | final_output = "No matching entries found" |
51 | log("Action show-sel completed:\n{}".format(final_output)) |
52 | - action_set({'message': final_output}) |
53 | + action_set({"message": final_output}) |
54 | except subprocess.CalledProcessError as e: |
55 | action_fail("Action failed with {}".format(e)) |
56 | |
57 | diff --git a/src/files/common/check_hw_health_cron_output.py b/src/files/common/check_hw_health_cron_output.py |
58 | index ebb63e1..92ff372 100755 |
59 | --- a/src/files/common/check_hw_health_cron_output.py |
60 | +++ b/src/files/common/check_hw_health_cron_output.py |
61 | @@ -10,11 +10,13 @@ |
62 | |
63 | from optparse import OptionParser |
64 | |
65 | -from nagios_plugin3 import (check_file_freshness, |
66 | - try_check, |
67 | - WarnError, |
68 | - UnknownError, |
69 | - CriticalError) |
70 | +from nagios_plugin3 import ( |
71 | + check_file_freshness, |
72 | + try_check, |
73 | + WarnError, |
74 | + UnknownError, |
75 | + CriticalError, |
76 | +) |
77 | |
78 | |
79 | ############################################################################### |
80 | @@ -55,17 +57,17 @@ def main(): |
81 | help="freshness time limit [default=%default]", |
82 | metavar="SECONDS", |
83 | default=1200, |
84 | - type=int |
85 | + type=int, |
86 | ) |
87 | parser.add_option( |
88 | - "-f", "--filename", |
89 | + "-f", |
90 | + "--filename", |
91 | dest="input_file", |
92 | - help=('file containing the output of ' |
93 | - 'cron_ilorest.py [default=%default]'), |
94 | + help=("file containing the output of cron_ilorest.py [default=%default]"), |
95 | metavar="FILE", |
96 | nargs=1, |
97 | # default="/var/lib/nagios/UNSETOUTPUTFILE.out", |
98 | - type=str |
99 | + type=str, |
100 | ) |
101 | |
102 | (opts, args) = parser.parse_args() |
103 | diff --git a/src/files/common/hw_health_lib.py b/src/files/common/hw_health_lib.py |
104 | index c75a4e5..945de0d 100644 |
105 | --- a/src/files/common/hw_health_lib.py |
106 | +++ b/src/files/common/hw_health_lib.py |
107 | @@ -60,25 +60,30 @@ def read_ignore_file(ignore_file): # noqa C901 |
108 | ignores = [] |
109 | if os.path.isfile(ignore_file): |
110 | for line in open(ignore_file).readlines(): |
111 | - d = {'matched': False, 'expired': False, 'line': line.rstrip(), 'ignore': None} |
112 | + d = { |
113 | + "matched": False, |
114 | + "expired": False, |
115 | + "line": line.rstrip(), |
116 | + "ignore": None, |
117 | + } |
118 | line = line.strip() |
119 | # special case lines starting with '*', do not disable if unmatched |
120 | persist = False |
121 | - if line.startswith('*'): |
122 | + if line.startswith("*"): |
123 | persist = True |
124 | - line = line.lstrip('*').strip() |
125 | + line = line.lstrip("*").strip() |
126 | # parse date lines |
127 | - if line.startswith('['): |
128 | - parts = re.split('\\[|\\]', line, maxsplit=2) |
129 | + if line.startswith("["): |
130 | + parts = re.split("\\[|\\]", line, maxsplit=2) |
131 | date = parts[1].strip() |
132 | ignore = parts[2].strip() |
133 | try: |
134 | - date = datetime.strptime(date, '%Y-%m-%d %H:%M') |
135 | + date = datetime.strptime(date, "%Y-%m-%d %H:%M") |
136 | except ValueError: |
137 | try: |
138 | - date = datetime.strptime(date, '%Y-%m-%d') |
139 | + date = datetime.strptime(date, "%Y-%m-%d") |
140 | except ValueError: |
141 | - print("Failed to parse ignore date: {}".format(d['line'])) |
142 | + print("Failed to parse ignore date: {}".format(d["line"])) |
143 | date = None |
144 | if date: |
145 | # Do not alert directly at midnight UTC |
146 | @@ -88,18 +93,18 @@ def read_ignore_file(ignore_file): # noqa C901 |
147 | if date: |
148 | if datetime.now().weekday() in (5, 6): |
149 | # Ignore Saturday/Sunday to not annoy on-call |
150 | - d['ignore'] = ignore |
151 | + d["ignore"] = ignore |
152 | elif date > datetime.now(): |
153 | - d['ignore'] = ignore |
154 | + d["ignore"] = ignore |
155 | else: |
156 | - d['expired'] = True |
157 | - if persist and not d['expired']: |
158 | + d["expired"] = True |
159 | + if persist and not d["expired"]: |
160 | # set matched True so will not get disabled on non-match |
161 | - d['matched'] = True |
162 | + d["matched"] = True |
163 | # comment lines and empty lines are just added |
164 | - elif line.startswith('#') or not line: |
165 | + elif line.startswith("#") or not line: |
166 | # add with matched True so does not trigger a file rewrite |
167 | - d['matched'] = True |
168 | + d["matched"] = True |
169 | # unrecognized lines left matched False so rewritten |
170 | ignores.append(d) |
171 | return ignores |
172 | @@ -107,22 +112,24 @@ def read_ignore_file(ignore_file): # noqa C901 |
173 | |
174 | def write_ignore_file(ignores, ignore_file): |
175 | # if any ignores are not matched then write out file lines again |
176 | - if any([not i['matched'] for i in ignores]): |
177 | + if any([not i["matched"] for i in ignores]): |
178 | dirname, basename = os.path.split(ignore_file) |
179 | date = datetime.now() |
180 | try: |
181 | f = tempfile.NamedTemporaryFile(dir=dirname, prefix=basename, delete=False) |
182 | for ignore in ignores: |
183 | - if not ignore['matched'] and ignore['ignore']: |
184 | - ignore['line'] = "# not matched at {} #{}".format( |
185 | - date.strftime("%Y-%m-%dT%H:%M:%S"), ignore['line'] |
186 | + if not ignore["matched"] and ignore["ignore"]: |
187 | + ignore["line"] = "# not matched at {} #{}".format( |
188 | + date.strftime("%Y-%m-%dT%H:%M:%S"), ignore["line"] |
189 | ) |
190 | - elif ignore['expired']: |
191 | + elif ignore["expired"]: |
192 | # this won't get updated unless the alert has cleared |
193 | - ignore['line'] = "# expired #{}".format(ignore['line']) |
194 | - elif not ignore['matched']: |
195 | - ignore['line'] = "# unknown or bad format #{}".format(ignore['line']) |
196 | - f.write(ignore['line'] + '\n') |
197 | + ignore["line"] = "# expired #{}".format(ignore["line"]) |
198 | + elif not ignore["matched"]: |
199 | + ignore["line"] = "# unknown or bad format #{}".format( |
200 | + ignore["line"] |
201 | + ) |
202 | + f.write(ignore["line"] + "\n") |
203 | f.flush() |
204 | os.fsync(f.fileno()) |
205 | f.close() |
206 | @@ -137,11 +144,11 @@ def write_ignore_file(ignores, ignore_file): |
207 | def ignore(line, ignores): |
208 | # check if each ignore is in line |
209 | for ignore in ignores: |
210 | - if ignore['ignore'] and ignore['ignore'] in line: |
211 | + if ignore["ignore"] and ignore["ignore"] in line: |
212 | ignoring_output.append("Ignoring: {}".format(line)) |
213 | # note: ignores can be updated since it is passed by reference |
214 | # matched=True to keep using this ignore (see write_ignore_file) |
215 | - ignore['matched'] = True |
216 | + ignore["matched"] = True |
217 | return True |
218 | return False |
219 | |
220 | @@ -153,102 +160,96 @@ def get_hp_controller_slots(): |
221 | Use the utility to determine the current controller slot(s) available for probing |
222 | """ |
223 | slots = [] |
224 | - cmd = ['/usr/sbin/hpssacli', 'ctrl', 'all', 'show'] |
225 | + cmd = ["/usr/sbin/hpssacli", "ctrl", "all", "show"] |
226 | try: |
227 | - results = subprocess.check_output(cmd).decode('UTF-8') |
228 | + results = subprocess.check_output(cmd).decode("UTF-8") |
229 | except subprocess.CalledProcessError: |
230 | return slots |
231 | for line in results.splitlines(): |
232 | - if 'in Slot' in line: |
233 | + if "in Slot" in line: |
234 | slots.append(line.split()[5]) |
235 | return slots |
236 | |
237 | |
238 | class HWCronArgumentParser(argparse.ArgumentParser): |
239 | - def __init__( |
240 | - self, |
241 | - def_write_file=None, |
242 | - *args, |
243 | - **kwargs |
244 | - ): |
245 | + def __init__(self, def_write_file=None, *args, **kwargs): |
246 | super().__init__( |
247 | - formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
248 | - *args, |
249 | - **kwargs |
250 | + formatter_class=argparse.ArgumentDefaultsHelpFormatter, *args, **kwargs |
251 | ) |
252 | # self.prog is populated by ArgumentParser |
253 | - self._def_write_file = \ |
254 | - def_write_file or '/var/lib/nagios/{}.out'.format(self.prog) |
255 | + self._def_write_file = def_write_file or "/var/lib/nagios/{}.out".format( |
256 | + self.prog |
257 | + ) |
258 | |
259 | def parse_args(self, *args, **kwargs): |
260 | self.add_argument( |
261 | - '-w', '--write', dest='write', type=str, |
262 | + "-w", |
263 | + "--write", |
264 | + dest="write", |
265 | + type=str, |
266 | default=self._def_write_file, |
267 | - help='cache tool output in this file', |
268 | + help="cache tool output in this file", |
269 | ) |
270 | super().parse_args(*args, **kwargs) |
271 | |
272 | |
273 | class HWCheckArgumentParser(argparse.ArgumentParser): |
274 | - def __init__( |
275 | - self, |
276 | - def_input_file=None, |
277 | - *args, |
278 | - **kwargs |
279 | - ): |
280 | + def __init__(self, def_input_file=None, *args, **kwargs): |
281 | super().__init__( |
282 | - formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
283 | - *args, |
284 | - **kwargs |
285 | + formatter_class=argparse.ArgumentDefaultsHelpFormatter, *args, **kwargs |
286 | ) |
287 | # self.prog is populated by ArgumentParser |
288 | - self._def_input_file = \ |
289 | - def_input_file or '/var/lib/nagios/{}.out'.format(self.prog) |
290 | + self._def_input_file = def_input_file or "/var/lib/nagios/{}.out".format( |
291 | + self.prog |
292 | + ) |
293 | |
294 | def parse_args(self, *args, **kwargs): |
295 | self.add_argument( |
296 | - '-i', '--input', dest='input_file', type=str, |
297 | + "-i", |
298 | + "--input", |
299 | + dest="input_file", |
300 | + type=str, |
301 | default=self._def_input_file, |
302 | - help='read cached tool output from this file', |
303 | + help="read cached tool output from this file", |
304 | ) |
305 | super().parse_args(*args, **kwargs) |
306 | |
307 | |
308 | class HPArgumentParser(HWCronArgumentParser): |
309 | - def __init__( |
310 | - self, |
311 | - def_exclude_file=None, |
312 | - *args, |
313 | - **kwargs |
314 | - ): |
315 | + def __init__(self, def_exclude_file=None, *args, **kwargs): |
316 | super().__init__(*args, **kwargs) |
317 | - self._def_exclude_file = \ |
318 | - def_exclude_file or '/etc/nagios/{}.exclude.yaml'.format(self.prog) |
319 | + self._def_exclude_file = ( |
320 | + def_exclude_file or "/etc/nagios/{}.exclude.yaml".format(self.prog) |
321 | + ) |
322 | |
323 | def _expired(self, exclusion): |
324 | - return 'expires' in exclusion and exclusion['expires'] < datetime.now() |
325 | + return "expires" in exclusion and exclusion["expires"] < datetime.now() |
326 | |
327 | def parse_args(self, *args, **kwargs): |
328 | self.add_argument( |
329 | - '--debug', dest='debug', action='store_true', |
330 | - help='Extra debugging', |
331 | + "--debug", dest="debug", action="store_true", help="Extra debugging", |
332 | ) |
333 | |
334 | self.add_argument( |
335 | - '--exclude', dest='exclude', type=str, action='append', |
336 | - help='Errors to ignore (multiple)', |
337 | + "--exclude", |
338 | + dest="exclude", |
339 | + type=str, |
340 | + action="append", |
341 | + help="Errors to ignore (multiple)", |
342 | ) |
343 | |
344 | self.add_argument( |
345 | - '--exclude-file', dest='exclude_file', type=str, |
346 | + "--exclude-file", |
347 | + dest="exclude_file", |
348 | + type=str, |
349 | default=self._def_exclude_file, |
350 | - help='YAML file with errors to ignore', |
351 | + help="YAML file with errors to ignore", |
352 | ) |
353 | |
354 | # Ensure we initialize a namespace if needed, |
355 | # and have a reference to it |
356 | - namespace = kwargs.get('namespace') or argparse.Namespace() |
357 | - kwargs['namespace'] = namespace |
358 | + namespace = kwargs.get("namespace") or argparse.Namespace() |
359 | + kwargs["namespace"] = namespace |
360 | # now parse args and put them in the namespace |
361 | super().parse_args(*args, **kwargs) |
362 | |
363 | @@ -258,6 +259,6 @@ class HPArgumentParser(HWCronArgumentParser): |
364 | with open(namespace.exclude_file) as f: |
365 | for i in yaml.safe_load(f): |
366 | if not self._expired(i): |
367 | - namespace.exclude.append(i['error']) |
368 | + namespace.exclude.append(i["error"]) |
369 | |
370 | return namespace |
371 | diff --git a/src/files/hplog/cron_hplog.py b/src/files/hplog/cron_hplog.py |
372 | index d2b21cf..57b52e2 100755 |
373 | --- a/src/files/hplog/cron_hplog.py |
374 | +++ b/src/files/hplog/cron_hplog.py |
375 | @@ -25,7 +25,7 @@ try: |
376 | except ImportError: |
377 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
378 | common_libs_dir = os.path.abspath( |
379 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
380 | + os.path.join(os.path.dirname(__file__), "..", "common") |
381 | ) |
382 | if common_libs_dir not in sys.path: |
383 | sys.path.append(common_libs_dir) |
384 | @@ -37,35 +37,43 @@ except ImportError: |
385 | ) |
386 | |
387 | FLAG_PROCESSOR = { |
388 | - 't': 'parse_temperature', |
389 | - 'f': 'parse_fans', |
390 | - 'p': 'parse_power', |
391 | - 'v': 'save_log', |
392 | + "t": "parse_temperature", |
393 | + "f": "parse_fans", |
394 | + "p": "parse_power", |
395 | + "v": "save_log", |
396 | } |
397 | -OUTPUT_FILE = '/var/lib/nagios/hplog.out' |
398 | -EXCLUDE_FILE = '/etc/nagios/hplog.exclude.yaml' |
399 | -DEBUG_FILE = '/var/lib/nagios/hplog.debug' |
400 | +OUTPUT_FILE = "/var/lib/nagios/hplog.out" |
401 | +EXCLUDE_FILE = "/etc/nagios/hplog.exclude.yaml" |
402 | +DEBUG_FILE = "/var/lib/nagios/hplog.debug" |
403 | |
404 | |
405 | def parse_args(argv=None): |
406 | parser = HPArgumentParser( |
407 | - prog='cron_hplog', |
408 | - def_write_file=OUTPUT_FILE, |
409 | - def_exclude_file=EXCLUDE_FILE |
410 | + prog="cron_hplog", def_write_file=OUTPUT_FILE, def_exclude_file=EXCLUDE_FILE |
411 | ) |
412 | parser.add_argument( |
413 | - '-f', '--hplog_flags', dest='hplog_flags', type=str, |
414 | - default=','.join(FLAG_PROCESSOR.keys()), |
415 | - help='Flags to call hplog with', |
416 | + "-f", |
417 | + "--hplog_flags", |
418 | + dest="hplog_flags", |
419 | + type=str, |
420 | + default=",".join(FLAG_PROCESSOR.keys()), |
421 | + help="Flags to call hplog with", |
422 | ) |
423 | parser.add_argument( |
424 | - '-l', '--log_path', dest='log_path', type=str, |
425 | + "-l", |
426 | + "--log_path", |
427 | + dest="log_path", |
428 | + type=str, |
429 | default=DEBUG_FILE, |
430 | - help='Where to write hplog -v output for troubleshooting', |
431 | + help="Where to write hplog -v output for troubleshooting", |
432 | ) |
433 | parser.add_argument( |
434 | - '-s', '--single_psu', dest='single_psu', action='store_true', |
435 | - help='Do not alert on lack of PSU redundancy', default=False, |
436 | + "-s", |
437 | + "--single_psu", |
438 | + dest="single_psu", |
439 | + action="store_true", |
440 | + help="Do not alert on lack of PSU redundancy", |
441 | + default=False, |
442 | ) |
443 | return parser.parse_args(args=argv) |
444 | |
445 | @@ -77,15 +85,16 @@ def call_hplog(flag): |
446 | The output of this cron script will be checked by |
447 | nagios via check_hplog.py. The |
448 | """ |
449 | - env = 'export PATH=$PATH:/usr/sbin:/sbin' |
450 | + env = "export PATH=$PATH:/usr/sbin:/sbin" |
451 | |
452 | - cmd = 'hplog -{}'.format(flag) |
453 | + cmd = "hplog -{}".format(flag) |
454 | try: |
455 | - cmdline = '{}; {}'.format(env, cmd) |
456 | - output = subprocess.check_output(cmdline, shell=True).decode('UTF-8') |
457 | + cmdline = "{}; {}".format(env, cmd) |
458 | + output = subprocess.check_output(cmdline, shell=True).decode("UTF-8") |
459 | except subprocess.CalledProcessError as e: |
460 | - return ('Failed running command "{}" Return Code {}: {}' |
461 | - ''.format(cmd, e.returncode, e.output)) |
462 | + return 'Failed running command "{}" Return Code {}: {}' "".format( |
463 | + cmd, e.returncode, e.output |
464 | + ) |
465 | |
466 | funcname = FLAG_PROCESSOR[flag] |
467 | return globals()[funcname](output) |
468 | @@ -106,10 +115,12 @@ def parse_temperature(result): |
469 | 7 Basic Sensor CPU (2) Normal ---F/---C 260F/127C |
470 | """ |
471 | input_file = result.splitlines() |
472 | - if os.path.isfile('/etc/nagios/skip-cat-hp-temperature.txt'): |
473 | + if os.path.isfile("/etc/nagios/skip-cat-hp-temperature.txt"): |
474 | return |
475 | header_line = input_file.pop(0).strip() |
476 | - if header_line != "ID TYPE LOCATION STATUS CURRENT THRESHOLD": # noqa E501 |
477 | + if ( |
478 | + header_line != "ID TYPE LOCATION STATUS CURRENT THRESHOLD" |
479 | + ): # noqa E501 |
480 | return "UNKNOWN Unrecognised header line in 'hplog -t' output" |
481 | for line in input_file: |
482 | line = line.rstrip() |
483 | @@ -123,11 +134,11 @@ def parse_temperature(result): |
484 | temp_current = line[42:51].split("/")[1].strip() |
485 | temp_threshold = line[52:].split("/")[1].strip() |
486 | if temp_status not in ["Normal", "Nominal", "Absent"]: |
487 | - return ( |
488 | - "%s: temperature is '%s' (%s / %s)" % (temp_location, |
489 | - temp_status, |
490 | - temp_current, |
491 | - temp_threshold) |
492 | + return "%s: temperature is '%s' (%s / %s)" % ( |
493 | + temp_location, |
494 | + temp_status, |
495 | + temp_current, |
496 | + temp_threshold, |
497 | ) |
498 | |
499 | return |
500 | @@ -147,13 +158,15 @@ def parse_fans(result): |
501 | 6 Var. Speed Processor Zone Normal Yes Low ( 36) |
502 | """ |
503 | input_file = result.splitlines() |
504 | - if os.path.isfile('/etc/nagios/skip-cat-hp-fans.txt'): |
505 | + if os.path.isfile("/etc/nagios/skip-cat-hp-fans.txt"): |
506 | return |
507 | header_line = input_file.pop(0).strip() |
508 | - if header_line != "ID TYPE LOCATION STATUS REDUNDANT FAN SPEED": # noqa E501 |
509 | + if ( |
510 | + header_line != "ID TYPE LOCATION STATUS REDUNDANT FAN SPEED" |
511 | + ): # noqa E501 |
512 | return "UNKNOWN Unrecognised header line in 'hplog -f' output" |
513 | |
514 | - ignore_file = '/etc/nagios/ignores/ignores-cat-hp-fans.txt' |
515 | + ignore_file = "/etc/nagios/ignores/ignores-cat-hp-fans.txt" |
516 | ignores = read_ignore_file(ignore_file) |
517 | for line in input_file: |
518 | line = line.rstrip() |
519 | @@ -168,12 +181,7 @@ def parse_fans(result): |
520 | fan_speed = line[51:].strip() |
521 | |
522 | (return_now, msg) = process_fan_line( |
523 | - fan_type, |
524 | - fan_location, |
525 | - fan_status, |
526 | - fan_speed, |
527 | - fan_redundant, |
528 | - ignores |
529 | + fan_type, fan_location, fan_status, fan_speed, fan_redundant, ignores |
530 | ) |
531 | if return_now: |
532 | return msg |
533 | @@ -182,36 +190,37 @@ def parse_fans(result): |
534 | |
535 | |
536 | def process_fan_line( |
537 | - fan_type, |
538 | - fan_location, |
539 | - fan_status, |
540 | - fan_speed, |
541 | - fan_redundant, |
542 | - ignores |
543 | + fan_type, fan_location, fan_status, fan_speed, fan_redundant, ignores |
544 | ): |
545 | if fan_type == "Basic Fan": |
546 | - return(False, None) |
547 | + return (False, None) |
548 | |
549 | if fan_type not in ["Var. Speed", "Pwr. Supply", "Auto. Speed"]: |
550 | - return(True, "UNKNOWN %s: Unrecognised fan type '%s'" % (fan_location, |
551 | - fan_type)) |
552 | + return ( |
553 | + True, |
554 | + "UNKNOWN %s: Unrecognised fan type '%s'" % (fan_location, fan_type), |
555 | + ) |
556 | |
557 | if fan_status not in ["Normal", "Nominal"]: |
558 | - err = "%s: fans are '%s' (%s / Redundant: %s)" % (fan_location, |
559 | - fan_status, |
560 | - fan_speed, |
561 | - fan_redundant) |
562 | + err = "%s: fans are '%s' (%s / Redundant: %s)" % ( |
563 | + fan_location, |
564 | + fan_status, |
565 | + fan_speed, |
566 | + fan_redundant, |
567 | + ) |
568 | if not ignore(err, ignores): |
569 | - return(True, err) |
570 | + return (True, err) |
571 | |
572 | if fan_redundant not in ["Yes", "N/A"] and fan_type == "Var. Speed": |
573 | - err = "%s: fans are not redundant (%s / Status: %s)" % (fan_location, |
574 | - fan_speed, |
575 | - fan_redundant) |
576 | + err = "%s: fans are not redundant (%s / Status: %s)" % ( |
577 | + fan_location, |
578 | + fan_speed, |
579 | + fan_redundant, |
580 | + ) |
581 | if not ignore(err, ignores): |
582 | - return(True, err) |
583 | + return (True, err) |
584 | |
585 | - return(False, None) |
586 | + return (False, None) |
587 | |
588 | |
589 | def parse_power(result): |
590 | @@ -224,13 +233,13 @@ def parse_power(result): |
591 | 2 Standard Pwr. Supply Bay Normal Yes |
592 | """ |
593 | input_file = result.splitlines() |
594 | - if os.path.isfile('/etc/nagios/skip-cat-hp-power.txt'): |
595 | + if os.path.isfile("/etc/nagios/skip-cat-hp-power.txt"): |
596 | return |
597 | header_line = input_file.pop(0).strip() |
598 | if header_line != "ID TYPE LOCATION STATUS REDUNDANT": |
599 | return "UNKNOWN Unrecognised header line in 'hplog -p' output" |
600 | |
601 | - ignore_file = '/etc/nagios/ignores/ignores-cat-hp-power.txt' |
602 | + ignore_file = "/etc/nagios/ignores/ignores-cat-hp-power.txt" |
603 | ignores = read_ignore_file(ignore_file) |
604 | |
605 | for line in input_file: |
606 | @@ -244,10 +253,9 @@ def parse_power(result): |
607 | power_status = line[33:40].strip() |
608 | # power_redundant = line[41:50].strip() |
609 | if power_type != "Standard": |
610 | - err = "%s: Unrecognised power type '%s'" % (power_location, |
611 | - power_type) |
612 | + err = "%s: Unrecognised power type '%s'" % (power_location, power_type) |
613 | if not ignore(err, ignores): |
614 | - return 'UNKNOWN {}'.format(err) |
615 | + return "UNKNOWN {}".format(err) |
616 | if not ARGS.single_psu and power_status not in ["Normal", "Nominal"]: |
617 | err = "%s: power supply is '%s'" % (power_location, power_status) |
618 | if not ignore(err, ignores): |
619 | @@ -259,7 +267,7 @@ def save_log(result): |
620 | """ |
621 | Save full hplog -v output for troubleshooting after alert |
622 | """ |
623 | - with open(ARGS.log_path, 'w') as f: |
624 | + with open(ARGS.log_path, "w") as f: |
625 | f.write(result) |
626 | return |
627 | |
628 | @@ -270,35 +278,34 @@ def main(): |
629 | |
630 | try: |
631 | # This matches hpasmlited on latest packages for bionic on <= gen9 |
632 | - subprocess.check_call('ps -ef | grep -q hp[a]sm', shell=True) |
633 | + subprocess.check_call("ps -ef | grep -q hp[a]sm", shell=True) |
634 | except subprocess.CalledProcessError as e: |
635 | msg = ( |
636 | - 'UNKNOWN hp[a]sm daemon not found running, cannot run hplog: ' |
637 | - '{}'.format(e.output) |
638 | + "UNKNOWN hp[a]sm daemon not found running, cannot run hplog: " |
639 | + "{}".format(e.output) |
640 | ) |
641 | exit = 3 |
642 | else: |
643 | errors = [] |
644 | - for flag in ARGS.hplog_flags.split(','): |
645 | + for flag in ARGS.hplog_flags.split(","): |
646 | log_output = call_hplog(flag) |
647 | if log_output: |
648 | errors.append(log_output) |
649 | |
650 | if len(errors) > 0: |
651 | - msg = ('CRIT {} error(s): {}' |
652 | - ''.format(len(errors), ' - '.join(errors))) |
653 | + msg = "CRIT {} error(s): {}".format(len(errors), " - ".join(errors)) |
654 | exit = 2 |
655 | else: |
656 | - msg = 'OK No errors found' |
657 | + msg = "OK No errors found" |
658 | exit = 0 |
659 | |
660 | if ARGS.write: |
661 | - with open(ARGS.write, 'w') as f: |
662 | + with open(ARGS.write, "w") as f: |
663 | f.write(msg) |
664 | else: |
665 | print(msg) |
666 | sys.exit(exit) |
667 | |
668 | |
669 | -if __name__ == '__main__': |
670 | +if __name__ == "__main__": |
671 | main() |
672 | diff --git a/src/files/ilorest/check_ilorest.py b/src/files/ilorest/check_ilorest.py |
673 | index 430c632..8b477c8 100755 |
674 | --- a/src/files/ilorest/check_ilorest.py |
675 | +++ b/src/files/ilorest/check_ilorest.py |
676 | @@ -10,10 +10,7 @@ |
677 | |
678 | from optparse import OptionParser |
679 | |
680 | -from nagios_plugin3 import (check_file_freshness, |
681 | - try_check, |
682 | - WarnError, |
683 | - CriticalError) |
684 | +from nagios_plugin3 import check_file_freshness, try_check, WarnError, CriticalError |
685 | |
686 | |
687 | ############################################################################### |
688 | @@ -52,16 +49,16 @@ def main(): |
689 | help="freshness time limit [default=%default]", |
690 | metavar="SECONDS", |
691 | default=1200, |
692 | - type=int |
693 | + type=int, |
694 | ) |
695 | parser.add_option( |
696 | - "-f", "--filename", |
697 | + "-f", |
698 | + "--filename", |
699 | dest="input_file", |
700 | - help=('file containing the output of ' |
701 | - 'cron_ilorest.py [default=%default]'), |
702 | + help=("file containing the output of cron_ilorest.py [default=%default]"), |
703 | metavar="FILE", |
704 | default="/var/lib/nagios/ilorest.nagios", |
705 | - type=str |
706 | + type=str, |
707 | ) |
708 | |
709 | (opts, args) = parser.parse_args() |
710 | diff --git a/src/files/ilorest/cron_ilorest.py b/src/files/ilorest/cron_ilorest.py |
711 | index 739438d..561d0aa 100755 |
712 | --- a/src/files/ilorest/cron_ilorest.py |
713 | +++ b/src/files/ilorest/cron_ilorest.py |
714 | @@ -18,7 +18,7 @@ try: |
715 | except ImportError: |
716 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
717 | common_libs_dir = os.path.abspath( |
718 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
719 | + os.path.join(os.path.dirname(__file__), "..", "common") |
720 | ) |
721 | if common_libs_dir not in sys.path: |
722 | sys.path.append(common_libs_dir) |
723 | @@ -26,16 +26,16 @@ except ImportError: |
724 | |
725 | |
726 | DEFAULT_SELECTORS = [ |
727 | - 'Chassis', |
728 | - 'HpeSmartStorage', |
729 | - 'Memory', |
730 | - 'Power', |
731 | - 'Processor', |
732 | - 'Thermal', |
733 | + "Chassis", |
734 | + "HpeSmartStorage", |
735 | + "Memory", |
736 | + "Power", |
737 | + "Processor", |
738 | + "Thermal", |
739 | ] |
740 | |
741 | -EXCLUDE_FILE = '/etc/nagios/cron_ilorest.exclude.yaml' |
742 | -OUTPUT_FILE = '/var/lib/nagios/ilorest.nagios' |
743 | +EXCLUDE_FILE = "/etc/nagios/cron_ilorest.exclude.yaml" |
744 | +OUTPUT_FILE = "/var/lib/nagios/ilorest.nagios" |
745 | |
746 | |
747 | class CronILOrest: |
748 | @@ -44,62 +44,64 @@ class CronILOrest: |
749 | |
750 | def parse_args(self, argv=None): |
751 | parser = HPArgumentParser( |
752 | - prog='cron_ilorest', |
753 | - description=('Convert the output of ilorest into an appropriate ' |
754 | - 'Nagios status line'), |
755 | + prog="cron_ilorest", |
756 | + description=( |
757 | + "Convert the output of ilorest into an appropriate " |
758 | + "Nagios status line" |
759 | + ), |
760 | def_write_file=OUTPUT_FILE, |
761 | - def_exclude_file=EXCLUDE_FILE |
762 | + def_exclude_file=EXCLUDE_FILE, |
763 | ) |
764 | |
765 | parser.add_argument( |
766 | - '--selectors', dest='selectors', type=str, nargs='+', |
767 | + "--selectors", |
768 | + dest="selectors", |
769 | + type=str, |
770 | + nargs="+", |
771 | default=DEFAULT_SELECTORS, |
772 | - help='iLO selectors to run', |
773 | + help="iLO selectors to run", |
774 | ) |
775 | |
776 | return parser.parse_args(args=argv) |
777 | |
778 | def check_selector(self, selector): |
779 | if self.args.debug: |
780 | - print('Checking selector {}'.format(selector), file=sys.stderr) |
781 | + print("Checking selector {}".format(selector), file=sys.stderr) |
782 | ilorest_output = self._get_json_ilorest_output(selector) |
783 | |
784 | errors = [] |
785 | jsonidx = -1 |
786 | # Disregard the first chunk of data, it's banner/debug/etc |
787 | - for jsondata in ilorest_output.split('\n{\n')[1:]: |
788 | + for jsondata in ilorest_output.split("\n{\n")[1:]: |
789 | # The output will be one or more JSON defs |
790 | jsonidx += 1 |
791 | - j = json.loads('{' + jsondata) |
792 | + j = json.loads("{" + jsondata) |
793 | errors += self._walk_selector(j, [selector, str(jsonidx)]) |
794 | return errors |
795 | |
796 | def _get_json_ilorest_output(self, selector): |
797 | - cmd = ['ilorest', 'list', '-j', '--selector={}'.format(selector)] |
798 | - return check_output(cmd).decode('UTF-8') |
799 | + cmd = ["ilorest", "list", "-j", "--selector={}".format(selector)] |
800 | + return check_output(cmd).decode("UTF-8") |
801 | |
802 | def _get_health_status_message(self, j, crumb_trail=[]): |
803 | - desc = j['Name'] |
804 | - if 'SerialNumber' in j: |
805 | - desc += ' ({})'.format(j['SerialNumber']) |
806 | - state = j.get('Status', 'null').get('State', 'unknown') |
807 | - health = j.get('Status', 'null').get('Health', 'unknown') |
808 | - msg = '{} ({}): {} health {}'.format(' '.join(crumb_trail), |
809 | - desc, |
810 | - state, |
811 | - health) |
812 | + desc = j["Name"] |
813 | + if "SerialNumber" in j: |
814 | + desc += " ({})".format(j["SerialNumber"]) |
815 | + state = j.get("Status", "null").get("State", "unknown") |
816 | + health = j.get("Status", "null").get("Health", "unknown") |
817 | + msg = "{} ({}): {} health {}".format(" ".join(crumb_trail), desc, state, health) |
818 | if self.args.debug: |
819 | print(msg, file=sys.stderr) |
820 | |
821 | if msg in self.args.exclude and self.args.debug: |
822 | - print('Ignoring excluded error: {}'.format(msg), file=sys.stderr) |
823 | + print("Ignoring excluded error: {}".format(msg), file=sys.stderr) |
824 | return [] |
825 | else: |
826 | return [msg] |
827 | |
828 | def _walk_selector(self, j, crumb_trail=[]): |
829 | errors = [] |
830 | - if j.get('Status') and j.get('Status').get('Health') != 'OK': |
831 | + if j.get("Status") and j.get("Status").get("Health") != "OK": |
832 | errors.extend(self._get_health_status_message(j, crumb_trail)) |
833 | |
834 | for keyname in j.keys(): |
835 | @@ -110,31 +112,31 @@ class CronILOrest: |
836 | for i in range(len(j[keyname])): |
837 | if type(j[keyname][i]) != dict: |
838 | continue |
839 | - if 'Status' not in j[keyname][i]: |
840 | + if "Status" not in j[keyname][i]: |
841 | continue |
842 | - self._walk_selector(j[keyname][i], |
843 | - (crumb_trail + [keyname, str(i)])) |
844 | + self._walk_selector(j[keyname][i], (crumb_trail + [keyname, str(i)])) |
845 | return errors |
846 | |
847 | |
848 | def main(argv=None): |
849 | cronilorest = CronILOrest(argv) |
850 | |
851 | - errors = [cronilorest.check_selector(selector) |
852 | - for selector in cronilorest.args.selectors] |
853 | + errors = [ |
854 | + cronilorest.check_selector(selector) for selector in cronilorest.args.selectors |
855 | + ] |
856 | |
857 | if len(errors) > 0: |
858 | - msg = 'CRIT {} error(s): {}'.format(len(errors), ' - '.join(errors)) |
859 | + msg = "CRIT {} error(s): {}".format(len(errors), " - ".join(errors)) |
860 | exit = 2 |
861 | else: |
862 | - msg = 'OK No errors found' |
863 | + msg = "OK No errors found" |
864 | exit = 0 |
865 | |
866 | if cronilorest.args.write: |
867 | - if cronilorest.args.write == '-': |
868 | + if cronilorest.args.write == "-": |
869 | print(msg) |
870 | else: |
871 | - with open(cronilorest.args.write, 'w') as f: |
872 | + with open(cronilorest.args.write, "w") as f: |
873 | f.write(msg) |
874 | else: |
875 | # This should never happen since 'write' has a default value |
876 | @@ -142,5 +144,5 @@ def main(argv=None): |
877 | sys.exit(exit) |
878 | |
879 | |
880 | -if __name__ == '__main__': |
881 | +if __name__ == "__main__": |
882 | main(sys.argv[1:]) |
883 | diff --git a/src/files/ipmi/check_ipmi.py b/src/files/ipmi/check_ipmi.py |
884 | index fafd774..1fbe34c 100644 |
885 | --- a/src/files/ipmi/check_ipmi.py |
886 | +++ b/src/files/ipmi/check_ipmi.py |
887 | @@ -3,36 +3,42 @@ |
888 | |
889 | import os |
890 | |
891 | -from nagios_plugin3 import CriticalError, UnknownError, WarnError, check_file_freshness, try_check |
892 | - |
893 | -OUTPUT_FILE = '/var/lib/nagios/ipmi_sensors.out' |
894 | +from nagios_plugin3 import ( |
895 | + CriticalError, |
896 | + UnknownError, |
897 | + WarnError, |
898 | + check_file_freshness, |
899 | + try_check, |
900 | +) |
901 | + |
902 | +OUTPUT_FILE = "/var/lib/nagios/ipmi_sensors.out" |
903 | NAGIOS_ERRORS = { |
904 | - 'CRITICAL': CriticalError, |
905 | - 'UNKNOWN': UnknownError, |
906 | - 'WARNING': WarnError, |
907 | + "CRITICAL": CriticalError, |
908 | + "UNKNOWN": UnknownError, |
909 | + "WARNING": WarnError, |
910 | } |
911 | |
912 | |
913 | def parse_output(): |
914 | if not os.path.exists(OUTPUT_FILE): |
915 | - raise UnknownError('UNKNOWN: {} does not exist (yet?)'.format(OUTPUT_FILE)) |
916 | + raise UnknownError("UNKNOWN: {} does not exist (yet?)".format(OUTPUT_FILE)) |
917 | |
918 | # Check if file is newer than 10min |
919 | try_check(check_file_freshness, OUTPUT_FILE) |
920 | |
921 | try: |
922 | - with open(OUTPUT_FILE, 'r') as fd: |
923 | + with open(OUTPUT_FILE, "r") as fd: |
924 | output = fd.read() |
925 | except PermissionError as error: |
926 | raise UnknownError(error) |
927 | |
928 | for startline in NAGIOS_ERRORS: |
929 | - if output.startswith('{}: '.format(startline)): |
930 | + if output.startswith("{}: ".format(startline)): |
931 | func = NAGIOS_ERRORS[startline] |
932 | raise func(output) |
933 | |
934 | - print('OK: {}'.format(output)) |
935 | + print("OK: {}".format(output)) |
936 | |
937 | |
938 | -if __name__ == '__main__': |
939 | +if __name__ == "__main__": |
940 | try_check(parse_output) |
941 | diff --git a/src/files/ipmi/cron_ipmi_sensors.py b/src/files/ipmi/cron_ipmi_sensors.py |
942 | index 2b6cdd5..aa3430d 100644 |
943 | --- a/src/files/ipmi/cron_ipmi_sensors.py |
944 | +++ b/src/files/ipmi/cron_ipmi_sensors.py |
945 | @@ -4,20 +4,20 @@ import os |
946 | import subprocess |
947 | import sys |
948 | |
949 | -CHECK_IPMI_PID = '/var/run/nagios/check_ipmi_sensors.pid' |
950 | -OUTPUT_FILE = '/var/lib/nagios/ipmi_sensors.out' |
951 | -TMP_OUTPUT_FILE = OUTPUT_FILE + '.tmp' |
952 | -CMD = '/usr/local/lib/nagios/plugins/check_ipmi_sensor' |
953 | +CHECK_IPMI_PID = "/var/run/nagios/check_ipmi_sensors.pid" |
954 | +OUTPUT_FILE = "/var/lib/nagios/ipmi_sensors.out" |
955 | +TMP_OUTPUT_FILE = OUTPUT_FILE + ".tmp" |
956 | +CMD = "/usr/local/lib/nagios/plugins/check_ipmi_sensor" |
957 | NAGIOS_ERRORS = { |
958 | - 1: 'WARNING', |
959 | - 2: 'CRITICAL', |
960 | - 3: 'UNKNOWN', |
961 | + 1: "WARNING", |
962 | + 2: "CRITICAL", |
963 | + 3: "UNKNOWN", |
964 | } |
965 | |
966 | |
967 | def write_output_file(output): |
968 | try: |
969 | - with open(TMP_OUTPUT_FILE, 'w') as fd: |
970 | + with open(TMP_OUTPUT_FILE, "w") as fd: |
971 | fd.write(output) |
972 | except IOError as e: |
973 | print("Cannot write output file {}, error {}".format(TMP_OUTPUT_FILE, e)) |
974 | @@ -29,16 +29,16 @@ def gather_metrics(): |
975 | # Check if a PID file exists |
976 | if os.path.exists(CHECK_IPMI_PID): |
977 | # is the PID valid? |
978 | - with open(CHECK_IPMI_PID, 'r') as fd: |
979 | + with open(CHECK_IPMI_PID, "r") as fd: |
980 | PID = fd.read() |
981 | - if PID not in os.listdir('/proc'): |
982 | + if PID not in os.listdir("/proc"): |
983 | # PID file is invalid, remove it |
984 | os.remove(CHECK_IPMI_PID) |
985 | else: |
986 | return |
987 | |
988 | try: |
989 | - with open(CHECK_IPMI_PID, 'w') as fd: |
990 | + with open(CHECK_IPMI_PID, "w") as fd: |
991 | fd.write(str(os.getpid())) |
992 | except IOError as e: |
993 | # unable to write PID file, can't lock |
994 | @@ -49,17 +49,17 @@ def gather_metrics(): |
995 | if len(sys.argv) > 1: |
996 | cmdline.extend(sys.argv[1:]) |
997 | try: |
998 | - output = subprocess.check_output(cmdline).decode('utf8') |
999 | + output = subprocess.check_output(cmdline).decode("utf8") |
1000 | write_output_file(output) |
1001 | except subprocess.CalledProcessError as error: |
1002 | - output = error.stdout.decode(errors='ignore') |
1003 | - write_output_file('{}: {}'.format(NAGIOS_ERRORS[error.returncode], output)) |
1004 | + output = error.stdout.decode(errors="ignore") |
1005 | + write_output_file("{}: {}".format(NAGIOS_ERRORS[error.returncode], output)) |
1006 | except PermissionError as error: |
1007 | - write_output_file('UNKNOWN: {}'.format(error)) |
1008 | + write_output_file("UNKNOWN: {}".format(error)) |
1009 | |
1010 | # remove pid reference |
1011 | os.remove(CHECK_IPMI_PID) |
1012 | |
1013 | |
1014 | -if __name__ == '__main__': |
1015 | +if __name__ == "__main__": |
1016 | gather_metrics() |
1017 | diff --git a/src/files/mdadm/check_mdadm.py b/src/files/mdadm/check_mdadm.py |
1018 | index 29a82c6..c602877 100755 |
1019 | --- a/src/files/mdadm/check_mdadm.py |
1020 | +++ b/src/files/mdadm/check_mdadm.py |
1021 | @@ -12,38 +12,33 @@ try: |
1022 | except ImportError: |
1023 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
1024 | common_libs_dir = os.path.abspath( |
1025 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
1026 | + os.path.join(os.path.dirname(__file__), "..", "common") |
1027 | ) |
1028 | if common_libs_dir not in sys.path: |
1029 | sys.path.append(common_libs_dir) |
1030 | from hw_health_lib import HWCheckArgumentParser |
1031 | |
1032 | -INPUT_FILE = '/var/lib/nagios/mdadm.out' |
1033 | +INPUT_FILE = "/var/lib/nagios/mdadm.out" |
1034 | ARGS = argparse.Namespace() |
1035 | |
1036 | |
1037 | def parse_output(): |
1038 | if not os.path.exists(ARGS.input_file): |
1039 | - raise UnknownError( |
1040 | - 'UNKNOWN: file not found ({})'.format(ARGS.input_file) |
1041 | - ) |
1042 | + raise UnknownError("UNKNOWN: file not found ({})".format(ARGS.input_file)) |
1043 | |
1044 | - with open(ARGS.input_file, 'r') as fd: |
1045 | + with open(ARGS.input_file, "r") as fd: |
1046 | for line in fd.readlines(): |
1047 | line = line.strip() |
1048 | - if line.startswith('CRITICAL: '): |
1049 | + if line.startswith("CRITICAL: "): |
1050 | raise CriticalError(line) |
1051 | - elif line.startswith('WARNING: '): |
1052 | + elif line.startswith("WARNING: "): |
1053 | raise WarnError(line) |
1054 | else: |
1055 | print(line) |
1056 | |
1057 | |
1058 | def parse_args(argv=None): |
1059 | - parser = HWCheckArgumentParser( |
1060 | - prog='check_mdadm', |
1061 | - def_input_file=INPUT_FILE, |
1062 | - ) |
1063 | + parser = HWCheckArgumentParser(prog="check_mdadm", def_input_file=INPUT_FILE) |
1064 | return parser.parse_args(args=argv, namespace=ARGS) |
1065 | |
1066 | |
1067 | @@ -52,5 +47,5 @@ def main(argv): |
1068 | try_check(parse_output) |
1069 | |
1070 | |
1071 | -if __name__ == '__main__': |
1072 | +if __name__ == "__main__": |
1073 | main(sys.argv[1:]) |
1074 | diff --git a/src/files/mdadm/cron_mdadm.py b/src/files/mdadm/cron_mdadm.py |
1075 | index 3e7e8e9..fed9ea8 100755 |
1076 | --- a/src/files/mdadm/cron_mdadm.py |
1077 | +++ b/src/files/mdadm/cron_mdadm.py |
1078 | @@ -13,7 +13,7 @@ try: |
1079 | except ImportError: |
1080 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
1081 | common_libs_dir = os.path.abspath( |
1082 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
1083 | + os.path.join(os.path.dirname(__file__), "..", "common") |
1084 | ) |
1085 | if common_libs_dir not in sys.path: |
1086 | sys.path.append(common_libs_dir) |
1087 | @@ -25,22 +25,20 @@ ARGS = argparse.Namespace() |
1088 | |
1089 | |
1090 | def get_devices(): |
1091 | - if os.path.exists('/sbin/mdadm'): |
1092 | + if os.path.exists("/sbin/mdadm"): |
1093 | try: |
1094 | cmd = ["/sbin/mdadm", "--detail", "--scan"] |
1095 | devices_raw = subprocess.check_output(cmd) |
1096 | devices_re = re.compile(r"^ARRAY\s+([^ ]+) ") |
1097 | devices = set() |
1098 | - for line in devices_raw.decode().split('\n'): |
1099 | + for line in devices_raw.decode().split("\n"): |
1100 | line = line.strip() |
1101 | device_re = devices_re.match(line) |
1102 | if device_re is not None: |
1103 | devices.add(device_re.group(1)) |
1104 | return devices |
1105 | except subprocess.CalledProcessError as error: |
1106 | - rc = generate_output( |
1107 | - "CRITICAL: get_devices error - {}".format(error) |
1108 | - ) |
1109 | + rc = generate_output("CRITICAL: get_devices error - {}".format(error)) |
1110 | if rc: |
1111 | sys.exit(0) |
1112 | return set() |
1113 | @@ -48,28 +46,28 @@ def get_devices(): |
1114 | |
1115 | def generate_output(msg): |
1116 | try: |
1117 | - with open(TEMP_FILE, 'w') as fd: |
1118 | + with open(TEMP_FILE, "w") as fd: |
1119 | fd.write(msg) |
1120 | shutil.move(TEMP_FILE, ARGS.write) |
1121 | return True |
1122 | except Exception as error: |
1123 | - print('Unable to generate output file:', error) |
1124 | + print("Unable to generate output file:", error) |
1125 | return False |
1126 | |
1127 | |
1128 | def get_devices_stats(devices): |
1129 | - mdadm_detail = ['/sbin/mdadm', '--detail'] |
1130 | + mdadm_detail = ["/sbin/mdadm", "--detail"] |
1131 | mdadm_detail.extend(sorted(devices)) |
1132 | |
1133 | devices_details_raw = subprocess.check_output(mdadm_detail) |
1134 | |
1135 | - devices_re = r'^(/\S+):$' |
1136 | - state_re = r'^\s*State\s+:\s+(.+)\s*$' |
1137 | - status_re = r'^\s*(Active|Working|Failed|Spare) Devices\s+:\s+(\d+)$' |
1138 | - rebuild_status_re = r'^\s*Rebuild Status\s+:\s+(\d+%\s+\S+)$' |
1139 | - removed_re = r'^\s*-\s+0\s+0\s+(\d+)\s+removed$' |
1140 | + devices_re = r"^(/\S+):$" |
1141 | + state_re = r"^\s*State\s+:\s+(.+)\s*$" |
1142 | + status_re = r"^\s*(Active|Working|Failed|Spare) Devices\s+:\s+(\d+)$" |
1143 | + rebuild_status_re = r"^\s*Rebuild Status\s+:\s+(\d+%\s+\S+)$" |
1144 | + removed_re = r"^\s*-\s+0\s+0\s+(\d+)\s+removed$" |
1145 | # 4 8 162 3 spare rebuilding /dev/sdk2 |
1146 | - rebuilding_re = r'^\s*\d+\s+\d+\s+\d+\s+\d+\s+\S+\s+rebuilding\s+(\S+)$' |
1147 | + rebuilding_re = r"^\s*\d+\s+\d+\s+\d+\s+\d+\s+\S+\s+rebuilding\s+(\S+)$" |
1148 | |
1149 | devices_cre = re.compile(devices_re) |
1150 | state_cre = re.compile(state_re) |
1151 | @@ -80,54 +78,50 @@ def get_devices_stats(devices): |
1152 | |
1153 | device = None |
1154 | devices_stats = {} |
1155 | - for line in devices_details_raw.decode().split('\n'): |
1156 | + for line in devices_details_raw.decode().split("\n"): |
1157 | line = line.rstrip() |
1158 | m = devices_cre.match(line) |
1159 | if m: |
1160 | device = m.group(1) |
1161 | devices_stats[device] = { |
1162 | - 'stats': { |
1163 | - 'Active': 0, |
1164 | - 'Working': 0, |
1165 | - 'Failed': 0, |
1166 | - 'Spare': 0, |
1167 | - }, |
1168 | - 'rebuild_status': '', |
1169 | - 'degraded': False, |
1170 | - 'recovering': False, |
1171 | - 'removed': [], |
1172 | - 'rebuilding': [], |
1173 | + "stats": {"Active": 0, "Working": 0, "Failed": 0, "Spare": 0}, |
1174 | + "rebuild_status": "", |
1175 | + "degraded": False, |
1176 | + "recovering": False, |
1177 | + "removed": [], |
1178 | + "rebuilding": [], |
1179 | } |
1180 | continue |
1181 | |
1182 | m = state_cre.match(line) |
1183 | if m: |
1184 | - # format for State line can be "clean" or "clean, degraded" or "active, degraded, rebuilding", etc. |
1185 | + # format for State line can be "clean" or "clean, degraded", |
1186 | + # or "active, degraded, rebuilding", etc. |
1187 | states = m.group(1).split(", ") |
1188 | - if 'degraded' in states and device: |
1189 | - devices_stats[device]['degraded'] = True |
1190 | - if 'recovering' in states and device: |
1191 | - devices_stats[device]['recovering'] = True |
1192 | + if "degraded" in states and device: |
1193 | + devices_stats[device]["degraded"] = True |
1194 | + if "recovering" in states and device: |
1195 | + devices_stats[device]["recovering"] = True |
1196 | continue |
1197 | |
1198 | m = status_cre.match(line) |
1199 | if m and device: |
1200 | - devices_stats[device]['stats'][m.group(1)] = int(m.group(2)) |
1201 | + devices_stats[device]["stats"][m.group(1)] = int(m.group(2)) |
1202 | continue |
1203 | |
1204 | m = removed_cre.match(line) |
1205 | if m and device: |
1206 | - devices_stats[device]['removed'].append(m.group(1)) |
1207 | + devices_stats[device]["removed"].append(m.group(1)) |
1208 | continue |
1209 | |
1210 | m = rebuild_status_cre.match(line) |
1211 | if m and device: |
1212 | - devices_stats[device]['rebuild_status'] = m.group(1) |
1213 | + devices_stats[device]["rebuild_status"] = m.group(1) |
1214 | continue |
1215 | |
1216 | m = rebuilding_cre.match(line) |
1217 | if m and device: |
1218 | - devices_stats[device]['rebuilding'].append(m.group(1)) |
1219 | + devices_stats[device]["rebuilding"].append(m.group(1)) |
1220 | continue |
1221 | |
1222 | return devices_stats |
1223 | @@ -136,14 +130,12 @@ def get_devices_stats(devices): |
1224 | def parse_output(): # noqa:C901 |
1225 | devices = get_devices() |
1226 | if len(devices) == 0: |
1227 | - return generate_output('WARNING: unexpectedly checked no devices') |
1228 | + return generate_output("WARNING: unexpectedly checked no devices") |
1229 | |
1230 | try: |
1231 | devices_stats = get_devices_stats(devices) |
1232 | except subprocess.CalledProcessError as error: |
1233 | - return generate_output( |
1234 | - "WARNING: error executing mdadm: {}".format(error) |
1235 | - ) |
1236 | + return generate_output("WARNING: error executing mdadm: {}".format(error)) |
1237 | |
1238 | msg = [] |
1239 | critical = False |
1240 | @@ -151,51 +143,50 @@ def parse_output(): # noqa:C901 |
1241 | for device in devices_stats: |
1242 | parts = [] |
1243 | # Is device degraded? |
1244 | - if devices_stats[device]['degraded'] and devices_stats[device]['recovering']: |
1245 | + if devices_stats[device]["degraded"] and devices_stats[device]["recovering"]: |
1246 | warning = True |
1247 | - parts = ['{} recovering'.format(device)] |
1248 | - elif devices_stats[device]['degraded']: |
1249 | + parts = ["{} recovering".format(device)] |
1250 | + elif devices_stats[device]["degraded"]: |
1251 | critical = True |
1252 | - parts = ['{} degraded'.format(device)] |
1253 | + parts = ["{} degraded".format(device)] |
1254 | else: |
1255 | - parts = ['{} ok'.format(device)] |
1256 | + parts = ["{} ok".format(device)] |
1257 | |
1258 | # If Failed drives are found, list counters (how many?) |
1259 | - failed_cnt = devices_stats[device]['stats'].get('Failed', 0) |
1260 | + failed_cnt = devices_stats[device]["stats"].get("Failed", 0) |
1261 | if failed_cnt > 0: |
1262 | critical = True |
1263 | dev_stats = [ |
1264 | - '{}[{}]'.format(status, devices_stats[device]['stats'][status]) |
1265 | - for status in sorted(devices_stats[device]['stats']) |
1266 | + "{}[{}]".format(status, devices_stats[device]["stats"][status]) |
1267 | + for status in sorted(devices_stats[device]["stats"]) |
1268 | ] |
1269 | parts.extend(dev_stats) |
1270 | |
1271 | - if len(devices_stats[device]['removed']) != 0: |
1272 | + if len(devices_stats[device]["removed"]) != 0: |
1273 | critical = True |
1274 | - members = " and ".join(devices_stats[device]['removed']) |
1275 | - parts.append('RaidDevice(s) {} marked removed'.format(members)) |
1276 | - |
1277 | - if len(devices_stats[device]['rebuilding']) != 0: |
1278 | - rebuilding_members = " ".join(devices_stats[device]['rebuilding']) |
1279 | - rebuild_status = devices_stats[device]['rebuild_status'] |
1280 | - parts.append('{} rebuilding ({})'.format(rebuilding_members, rebuild_status)) |
1281 | + members = " and ".join(devices_stats[device]["removed"]) |
1282 | + parts.append("RaidDevice(s) {} marked removed".format(members)) |
1283 | + |
1284 | + if len(devices_stats[device]["rebuilding"]) != 0: |
1285 | + rebuilding_members = " ".join(devices_stats[device]["rebuilding"]) |
1286 | + rebuild_status = devices_stats[device]["rebuild_status"] |
1287 | + parts.append( |
1288 | + "{} rebuilding ({})".format(rebuilding_members, rebuild_status) |
1289 | + ) |
1290 | |
1291 | - msg.append(', '.join(parts)) |
1292 | + msg.append(", ".join(parts)) |
1293 | |
1294 | if critical: |
1295 | - msg = 'CRITICAL: {}'.format('; '.join(msg)) |
1296 | + msg = "CRITICAL: {}".format("; ".join(msg)) |
1297 | elif warning: |
1298 | - msg = 'WARNING: {}'.format('; '.join(msg)) |
1299 | + msg = "WARNING: {}".format("; ".join(msg)) |
1300 | else: |
1301 | - msg = 'OK: {}'.format('; '.join(msg)) |
1302 | + msg = "OK: {}".format("; ".join(msg)) |
1303 | return generate_output(msg) |
1304 | |
1305 | |
1306 | def parse_args(argv=None): |
1307 | - parser = HWCronArgumentParser( |
1308 | - prog='cron_mdadm', |
1309 | - def_write_file=OUTPUT_FILE, |
1310 | - ) |
1311 | + parser = HWCronArgumentParser(prog="cron_mdadm", def_write_file=OUTPUT_FILE) |
1312 | return parser.parse_args(args=argv, namespace=ARGS) |
1313 | |
1314 | |
1315 | diff --git a/src/files/megacli/check_megacli.py b/src/files/megacli/check_megacli.py |
1316 | index b587d10..03d3a5d 100755 |
1317 | --- a/src/files/megacli/check_megacli.py |
1318 | +++ b/src/files/megacli/check_megacli.py |
1319 | @@ -13,55 +13,54 @@ try: |
1320 | except ImportError: |
1321 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
1322 | common_libs_dir = os.path.abspath( |
1323 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
1324 | + os.path.join(os.path.dirname(__file__), "..", "common") |
1325 | ) |
1326 | if common_libs_dir not in sys.path: |
1327 | sys.path.append(common_libs_dir) |
1328 | from hw_health_lib import HWCheckArgumentParser |
1329 | |
1330 | -INPUT_FILE = '/var/lib/nagios/megacli.out' |
1331 | +INPUT_FILE = "/var/lib/nagios/megacli.out" |
1332 | ARGS = argparse.Namespace() |
1333 | |
1334 | |
1335 | -def handle_results( |
1336 | - nlines, match, critical, errors, num_ldrive, num_pdrive, policy |
1337 | -): |
1338 | +def handle_results(nlines, match, critical, errors, num_ldrive, num_pdrive, policy): |
1339 | if nlines == 0: |
1340 | - raise WarnError('WARNING: controller not found') |
1341 | + raise WarnError("WARNING: controller not found") |
1342 | elif not match: |
1343 | - raise WarnError('WARNING: error parsing megacli output') |
1344 | + raise WarnError("WARNING: error parsing megacli output") |
1345 | elif critical: |
1346 | if len(errors) > 0: |
1347 | - msg = ', '.join([ |
1348 | - '{}({})'.format(cnt, vars()[cnt]) |
1349 | - for cnt in ('failed_ld', 'wrg_policy_ld') |
1350 | - if vars().get(cnt, 0) > 0 |
1351 | - ]) |
1352 | - msg += '; '.join(errors) |
1353 | + msg = ", ".join( |
1354 | + [ |
1355 | + "{}({})".format(cnt, vars()[cnt]) |
1356 | + for cnt in ("failed_ld", "wrg_policy_ld") |
1357 | + if vars().get(cnt, 0) > 0 |
1358 | + ] |
1359 | + ) |
1360 | + msg += "; ".join(errors) |
1361 | else: |
1362 | - msg = 'failure caught but no output available' |
1363 | - raise CriticalError('CRITICAL: {}'.format(msg)) |
1364 | + msg = "failure caught but no output available" |
1365 | + raise CriticalError("CRITICAL: {}".format(msg)) |
1366 | elif len(errors) > 0: |
1367 | - raise WarnError('WARNING: {}'.format('; '.join(errors))) |
1368 | + raise WarnError("WARNING: {}".format("; ".join(errors))) |
1369 | |
1370 | else: |
1371 | if num_ldrive == 0: |
1372 | - msg = 'OK: no disks configured for RAID' |
1373 | + msg = "OK: no disks configured for RAID" |
1374 | else: |
1375 | - msg = ('OK: Optimal, ldrives[{}], pdrives[{}]' |
1376 | - ''.format(num_ldrive, num_pdrive)) |
1377 | + msg = "OK: Optimal, ldrives[{}], pdrives[{}]".format(num_ldrive, num_pdrive) |
1378 | if policy: |
1379 | - msg += ', policy[{}]'.format(policy) |
1380 | + msg += ", policy[{}]".format(policy) |
1381 | print(msg) |
1382 | |
1383 | |
1384 | def parse_output(policy=False): # noqa:C901 |
1385 | - noadapter_re = r'^Adapter \d+: No Virtual Drive Configured' |
1386 | - adapter_re = r'^Adapter (\d+) -- Virtual Drive Information:' |
1387 | - ldrive_re = r'^Virtual Drive\s*:\s+(\d+)' |
1388 | - state_re = r'^State\s*:\s+([^\n]+)' |
1389 | - npdrives_re = r'^Number Of Drives(?: per span)?\s*:\s+(\d+)' |
1390 | - w_policy_re = r'^Current Cache Policy\s*:\s+([^,]+)' |
1391 | + noadapter_re = r"^Adapter \d+: No Virtual Drive Configured" |
1392 | + adapter_re = r"^Adapter (\d+) -- Virtual Drive Information:" |
1393 | + ldrive_re = r"^Virtual Drive\s*:\s+(\d+)" |
1394 | + state_re = r"^State\s*:\s+([^\n]+)" |
1395 | + npdrives_re = r"^Number Of Drives(?: per span)?\s*:\s+(\d+)" |
1396 | + w_policy_re = r"^Current Cache Policy\s*:\s+([^,]+)" |
1397 | |
1398 | noadapter_cre = re.compile(noadapter_re) |
1399 | adapter_cre = re.compile(adapter_re) |
1400 | @@ -78,7 +77,7 @@ def parse_output(policy=False): # noqa:C901 |
1401 | |
1402 | with open(ARGS.input_file) as devices_raw: |
1403 | for line in devices_raw.readlines(): |
1404 | - if len(line.strip()) and not line.startswith('Exit Code'): |
1405 | + if len(line.strip()) and not line.startswith("Exit Code"): |
1406 | nlines += 1 |
1407 | |
1408 | if noadapter_cre.match(line): |
1409 | @@ -101,10 +100,11 @@ def parse_output(policy=False): # noqa:C901 |
1410 | if m: |
1411 | num_ldrive += 1 |
1412 | state = m.group(1) |
1413 | - if state != 'Optimal': |
1414 | + if state != "Optimal": |
1415 | failed_ld += 1 |
1416 | - msg = 'adapter({}):ld({}):state({})'.format( |
1417 | - adapter_id, ldrive_id, state) |
1418 | + msg = "adapter({}):ld({}):state({})".format( |
1419 | + adapter_id, ldrive_id, state |
1420 | + ) |
1421 | errors.append(msg) |
1422 | critical = True |
1423 | continue |
1424 | @@ -120,22 +120,18 @@ def parse_output(policy=False): # noqa:C901 |
1425 | w_policy = m.group(1) |
1426 | if w_policy != policy: |
1427 | wrg_policy_ld += 1 |
1428 | - msg = 'adp({}):ld({}):policy({})'.format( |
1429 | - adapter_id, ldrive_id, w_policy) |
1430 | + msg = "adp({}):ld({}):policy({})".format( |
1431 | + adapter_id, ldrive_id, w_policy |
1432 | + ) |
1433 | errors.append(msg) |
1434 | critical = True |
1435 | continue |
1436 | |
1437 | - handle_results( |
1438 | - nlines, match, critical, errors, num_ldrive, num_pdrive, policy |
1439 | - ) |
1440 | + handle_results(nlines, match, critical, errors, num_ldrive, num_pdrive, policy) |
1441 | |
1442 | |
1443 | def parse_args(argv=None): |
1444 | - parser = HWCheckArgumentParser( |
1445 | - prog='check_megacli', |
1446 | - def_input_file=INPUT_FILE, |
1447 | - ) |
1448 | + parser = HWCheckArgumentParser(prog="check_megacli", def_input_file=INPUT_FILE) |
1449 | return parser.parse_args(args=argv, namespace=ARGS) |
1450 | |
1451 | |
1452 | @@ -144,5 +140,5 @@ def main(argv, policy=False): |
1453 | try_check(parse_output, policy) |
1454 | |
1455 | |
1456 | -if __name__ == '__main__': |
1457 | +if __name__ == "__main__": |
1458 | main(sys.argv[1:]) |
1459 | diff --git a/src/files/nvme/check_nvme.py b/src/files/nvme/check_nvme.py |
1460 | index d4117c9..701bc9c 100755 |
1461 | --- a/src/files/nvme/check_nvme.py |
1462 | +++ b/src/files/nvme/check_nvme.py |
1463 | @@ -8,55 +8,64 @@ import re |
1464 | import subprocess |
1465 | from nagios_plugin3 import CriticalError, try_check, UnknownError |
1466 | |
1467 | -NVME_RE = re.compile(r'^/dev/nvme\d+$') |
1468 | +NVME_RE = re.compile(r"^/dev/nvme\d+$") |
1469 | |
1470 | |
1471 | def parse_output(): |
1472 | keymap = {} |
1473 | critical = False |
1474 | alloutputs = [] |
1475 | - for device in glob.glob('/dev/nvme*'): |
1476 | + for device in glob.glob("/dev/nvme*"): |
1477 | if not NVME_RE.match(device): |
1478 | continue |
1479 | try: |
1480 | - output = subprocess.check_output(['sudo', '/usr/sbin/nvme', |
1481 | - 'smart-log', device]) |
1482 | + output = subprocess.check_output( |
1483 | + ["sudo", "/usr/sbin/nvme", "smart-log", device] |
1484 | + ) |
1485 | except subprocess.CalledProcessError as error: |
1486 | - print('nvme check error: {}'.format(error)) |
1487 | + print("nvme check error: {}".format(error)) |
1488 | return |
1489 | |
1490 | - for line in output.decode(errors='ignore').splitlines(): |
1491 | - datavalues_re = re.match(r'^(\w+)\s+:\s+([\d.]+)', line.strip()) |
1492 | + for line in output.decode(errors="ignore").splitlines(): |
1493 | + datavalues_re = re.match(r"^(\w+)\s+:\s+([\d.]+)", line.strip()) |
1494 | if not datavalues_re: |
1495 | continue |
1496 | key, value = datavalues_re.groups() |
1497 | - keymap[key] = value.replace('.', '') |
1498 | + keymap[key] = value.replace(".", "") |
1499 | |
1500 | - if int(keymap['critical_warning']) != 0: |
1501 | - status = ('CRITICAL: {} critical_warning is {}' |
1502 | - '').format(device, keymap['critical_warning']) |
1503 | + if int(keymap["critical_warning"]) != 0: |
1504 | + status = ("CRITICAL: {} critical_warning is {}").format( |
1505 | + device, keymap["critical_warning"] |
1506 | + ) |
1507 | critical = True |
1508 | else: |
1509 | - status = 'OK: no errors on {}'.format(device) |
1510 | + status = "OK: no errors on {}".format(device) |
1511 | |
1512 | - alloutputs.append('{} | {}'.format( |
1513 | - status, ' '.join(['{}={}'.format(repr(key), value) |
1514 | - for key, value in keymap.items()]))) |
1515 | + alloutputs.append( |
1516 | + "{} | {}".format( |
1517 | + status, |
1518 | + " ".join( |
1519 | + ["{}={}".format(repr(key), value) for key, value in keymap.items()] |
1520 | + ), |
1521 | + ) |
1522 | + ) |
1523 | |
1524 | if critical: |
1525 | - raise CriticalError('\n'.join(alloutputs)) |
1526 | + raise CriticalError("\n".join(alloutputs)) |
1527 | |
1528 | if not alloutputs: |
1529 | - raise UnknownError('no nvme devices found') |
1530 | + raise UnknownError("no nvme devices found") |
1531 | |
1532 | - print('\n'.join(alloutputs)) |
1533 | + print("\n".join(alloutputs)) |
1534 | |
1535 | |
1536 | def parse_args(argv=None): |
1537 | parser = argparse.ArgumentParser( |
1538 | - prog='check_nvme', |
1539 | - description=('this program reads the nvme smart-log and outputs an ' |
1540 | - 'appropriate Nagios status line'), |
1541 | + prog="check_nvme", |
1542 | + description=( |
1543 | + "this program reads the nvme smart-log and outputs an " |
1544 | + "appropriate Nagios status line" |
1545 | + ), |
1546 | formatter_class=argparse.ArgumentDefaultsHelpFormatter, |
1547 | ) |
1548 | return parser.parse_args(argv) |
1549 | @@ -67,5 +76,5 @@ def main(argv): |
1550 | try_check(parse_output) |
1551 | |
1552 | |
1553 | -if __name__ == '__main__': |
1554 | +if __name__ == "__main__": |
1555 | main(sys.argv[1:]) |
1556 | diff --git a/src/files/sas2ircu/check_sas2ircu.py b/src/files/sas2ircu/check_sas2ircu.py |
1557 | index b38e131..9628893 100755 |
1558 | --- a/src/files/sas2ircu/check_sas2ircu.py |
1559 | +++ b/src/files/sas2ircu/check_sas2ircu.py |
1560 | @@ -13,20 +13,20 @@ try: |
1561 | except ImportError: |
1562 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
1563 | common_libs_dir = os.path.abspath( |
1564 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
1565 | + os.path.join(os.path.dirname(__file__), "..", "common") |
1566 | ) |
1567 | if common_libs_dir not in sys.path: |
1568 | sys.path.append(common_libs_dir) |
1569 | from hw_health_lib import HWCheckArgumentParser |
1570 | |
1571 | -INPUT_FILE = '/var/lib/nagios/sas2ircu.out' |
1572 | +INPUT_FILE = "/var/lib/nagios/sas2ircu.out" |
1573 | ARGS = argparse.Namespace() |
1574 | |
1575 | |
1576 | def parse_output(): |
1577 | - enclosure_re = r'^\s+Enclosure #\s+:\s+(\d+)' |
1578 | - slot_re = r'^\s+Slot #\s+:\s+(\d+)' |
1579 | - state_re = r'^\s+State\s+:\s+(\S+)' |
1580 | + enclosure_re = r"^\s+Enclosure #\s+:\s+(\d+)" |
1581 | + slot_re = r"^\s+Slot #\s+:\s+(\d+)" |
1582 | + state_re = r"^\s+State\s+:\s+(\S+)" |
1583 | |
1584 | encl_slot_state_cre = [ |
1585 | re.compile(enclosure_re), |
1586 | @@ -47,28 +47,25 @@ def parse_output(): |
1587 | |
1588 | if len(device) == 3: |
1589 | tmpdev = devices.get(device[2], []) |
1590 | - tmpdev.append('{}:{}'.format(device[0], device[1])) |
1591 | + tmpdev.append("{}:{}".format(device[0], device[1])) |
1592 | devices[device[2]] = tmpdev |
1593 | - if not ('Ready' in device or 'Optimal' in device): |
1594 | + if not ("Ready" in device or "Optimal" in device): |
1595 | critical = True |
1596 | device = [] |
1597 | |
1598 | - msg = '; '.join([ |
1599 | - '{}[{}]'.format(state, ','.join(devices[state])) for state in devices |
1600 | - ]) |
1601 | - if msg == '': |
1602 | - raise WarnError('WARNING: no output') |
1603 | + msg = "; ".join( |
1604 | + ["{}[{}]".format(state, ",".join(devices[state])) for state in devices] |
1605 | + ) |
1606 | + if msg == "": |
1607 | + raise WarnError("WARNING: no output") |
1608 | elif critical: |
1609 | - raise CriticalError('CRITICAL: {}'.format(msg)) |
1610 | + raise CriticalError("CRITICAL: {}".format(msg)) |
1611 | else: |
1612 | - print('OK: {}'.format(msg)) |
1613 | + print("OK: {}".format(msg)) |
1614 | |
1615 | |
1616 | def parse_args(argv=None): |
1617 | - parser = HWCheckArgumentParser( |
1618 | - prog='check_sas2ircu', |
1619 | - def_input_file=INPUT_FILE, |
1620 | - ) |
1621 | + parser = HWCheckArgumentParser(prog="check_sas2ircu", def_input_file=INPUT_FILE) |
1622 | return parser.parse_args(args=argv, namespace=ARGS) |
1623 | |
1624 | |
1625 | @@ -77,5 +74,5 @@ def main(argv): |
1626 | try_check(parse_output) |
1627 | |
1628 | |
1629 | -if __name__ == '__main__': |
1630 | +if __name__ == "__main__": |
1631 | main(sys.argv[1:]) |
1632 | diff --git a/src/files/sas3ircu/check_sas3ircu.py b/src/files/sas3ircu/check_sas3ircu.py |
1633 | index d62a90f..8b62679 100755 |
1634 | --- a/src/files/sas3ircu/check_sas3ircu.py |
1635 | +++ b/src/files/sas3ircu/check_sas3ircu.py |
1636 | @@ -14,182 +14,174 @@ try: |
1637 | except ImportError: |
1638 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
1639 | common_libs_dir = os.path.abspath( |
1640 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
1641 | + os.path.join(os.path.dirname(__file__), "..", "common") |
1642 | ) |
1643 | if common_libs_dir not in sys.path: |
1644 | sys.path.append(common_libs_dir) |
1645 | from hw_health_lib import HWCheckArgumentParser |
1646 | |
1647 | -INPUT_FILE = '/var/lib/nagios/sas3ircu.out' |
1648 | +INPUT_FILE = "/var/lib/nagios/sas3ircu.out" |
1649 | ARGS = argparse.Namespace() |
1650 | |
1651 | |
1652 | def parse_output(input_file): |
1653 | - ''' |
1654 | + """ |
1655 | Turn the whole sas3ircu output into a dictionary |
1656 | - ''' |
1657 | + """ |
1658 | sections_re = re.compile( |
1659 | - r'(?<=^Controller information\n)' |
1660 | - r'-+\n' |
1661 | - r'(?P<ctrl>(?:.|\n)*)' |
1662 | - r'^-+\n' |
1663 | - r'^IR Volume information\n' |
1664 | - r'-+\n' |
1665 | - r'(?P<vols>(?:.|\n)*)' |
1666 | - r'^-+\n' |
1667 | - r'^Physical device information\n' |
1668 | - r'-+\n' |
1669 | - r'(?P<disks>(?:.|\n)*)' |
1670 | - r'^-+\n' |
1671 | - r'^Enclosure information\n' |
1672 | - r'-+\n' |
1673 | - r'(?P<encl>(?:.|\n)*)' |
1674 | - r'^-+\n', |
1675 | - re.MULTILINE |
1676 | + r"(?<=^Controller information\n)" |
1677 | + r"-+\n" |
1678 | + r"(?P<ctrl>(?:.|\n)*)" |
1679 | + r"^-+\n" |
1680 | + r"^IR Volume information\n" |
1681 | + r"-+\n" |
1682 | + r"(?P<vols>(?:.|\n)*)" |
1683 | + r"^-+\n" |
1684 | + r"^Physical device information\n" |
1685 | + r"-+\n" |
1686 | + r"(?P<disks>(?:.|\n)*)" |
1687 | + r"^-+\n" |
1688 | + r"^Enclosure information\n" |
1689 | + r"-+\n" |
1690 | + r"(?P<encl>(?:.|\n)*)" |
1691 | + r"^-+\n", |
1692 | + re.MULTILINE, |
1693 | ) |
1694 | disks_re = re.compile( |
1695 | - r'(?<=^Device is a Hard disk\n)(?P<kv_data>(?:.|\n)*?)(?=^$)', |
1696 | - re.MULTILINE |
1697 | + r"(?<=^Device is a Hard disk\n)(?P<kv_data>(?:.|\n)*?)(?=^$)", re.MULTILINE |
1698 | ) |
1699 | |
1700 | with open(input_file) as devices_raw: |
1701 | sections = sections_re.search(devices_raw.read()).groupdict() |
1702 | - controller = _kv_parse(sections['ctrl']) |
1703 | - volumes = _vols_parse(sections['vols']) |
1704 | + controller = _kv_parse(sections["ctrl"]) |
1705 | + volumes = _vols_parse(sections["vols"]) |
1706 | # This collects disk level information in a structure simulating the |
1707 | # physical encl/slot arrangement |
1708 | topology = defaultdict(dict) |
1709 | - for match in disks_re.findall(sections['disks']): |
1710 | + for match in disks_re.findall(sections["disks"]): |
1711 | disk = _kv_parse(match) |
1712 | - encl = disk['Enclosure #'] |
1713 | - slot = disk['Slot #'] |
1714 | + encl = disk["Enclosure #"] |
1715 | + slot = disk["Slot #"] |
1716 | topology[encl][slot] = disk |
1717 | - enclosure = _kv_parse(sections['encl']) |
1718 | + enclosure = _kv_parse(sections["encl"]) |
1719 | |
1720 | return { |
1721 | - 'controller': controller, |
1722 | - 'volumes': volumes, |
1723 | - 'disks': topology, |
1724 | - 'enclosure': enclosure, |
1725 | + "controller": controller, |
1726 | + "volumes": volumes, |
1727 | + "disks": topology, |
1728 | + "enclosure": enclosure, |
1729 | } |
1730 | |
1731 | |
1732 | def _vols_parse(text): |
1733 | vols_re = re.compile( |
1734 | - r'^IR volume (?P<n>\d+)\n' |
1735 | - r'(?P<kv_data>(?:.|\n)*?)' |
1736 | - r'\s+Physical hard disks\s+:.*\n' |
1737 | - r'(?P<topology>(?:^\s+PHY.*\n)+)', |
1738 | - re.MULTILINE |
1739 | + r"^IR volume (?P<n>\d+)\n" |
1740 | + r"(?P<kv_data>(?:.|\n)*?)" |
1741 | + r"\s+Physical hard disks\s+:.*\n" |
1742 | + r"(?P<topology>(?:^\s+PHY.*\n)+)", |
1743 | + re.MULTILINE, |
1744 | ) |
1745 | vol_topology_re = re.compile( |
1746 | - r'\s+PHY\[(?P<n>\d+)\]\s+Enclosure#\/Slot#\s+' |
1747 | - r':\s+(?P<enc>\d+):(?P<slot>\d+)' |
1748 | + r"\s+PHY\[(?P<n>\d+)\]\s+Enclosure#\/Slot#\s+" r":\s+(?P<enc>\d+):(?P<slot>\d+)" |
1749 | ) |
1750 | volumes = {} |
1751 | for (vol_n, kv_data, vol_topology) in vols_re.findall(text): |
1752 | topology = {} |
1753 | for (member_n, enc, slot) in vol_topology_re.findall(vol_topology): |
1754 | - topology[member_n] = {'enc': enc, 'slot': slot} |
1755 | - volumes[vol_n] = {**_kv_parse(kv_data), 'topology': topology} |
1756 | + topology[member_n] = {"enc": enc, "slot": slot} |
1757 | + volumes[vol_n] = {**_kv_parse(kv_data), "topology": topology} |
1758 | |
1759 | return volumes |
1760 | |
1761 | |
1762 | def _kv_parse(text): |
1763 | - ''' |
1764 | + """ |
1765 | Build a dict by parsing text like: |
1766 | |
1767 | key1 : value1 |
1768 | key2 : value2 |
1769 | - ''' |
1770 | - key_value_re = re.compile( |
1771 | - r'^\s*(?P<key>.*?)\s+:\s+(?P<value>.*)' |
1772 | - ) |
1773 | + """ |
1774 | + key_value_re = re.compile(r"^\s*(?P<key>.*?)\s+:\s+(?P<value>.*)") |
1775 | text = text.strip() |
1776 | return { |
1777 | - m.group('key'): m.group('value') |
1778 | - for m in map(key_value_re.search, text.split('\n')) |
1779 | + m.group("key"): m.group("value") |
1780 | + for m in map(key_value_re.search, text.split("\n")) |
1781 | } |
1782 | |
1783 | |
1784 | def eval_status(data): |
1785 | - ''' |
1786 | + """ |
1787 | Given a dictionary and a set of rules, determine the state of the storage |
1788 | subsystem |
1789 | - ''' |
1790 | - OK = 'Okay (OKY)' |
1791 | - READY = 'Ready (RDY)' |
1792 | - OPTIMAL = 'Optimal (OPT)' |
1793 | + """ |
1794 | + OK = "Okay (OKY)" |
1795 | + READY = "Ready (RDY)" |
1796 | + OPTIMAL = "Optimal (OPT)" |
1797 | status = Status() |
1798 | |
1799 | # 1. Volumes must be in Okay state |
1800 | - for volume in data['volumes'].values(): |
1801 | - vol_id = volume['Volume ID'] |
1802 | - vol_status = volume['Status of volume'] |
1803 | + for volume in data["volumes"].values(): |
1804 | + vol_id = volume["Volume ID"] |
1805 | + vol_status = volume["Status of volume"] |
1806 | if vol_status != OK: |
1807 | status.crit("Volume {}: {}".format(vol_id, vol_status)) |
1808 | else: |
1809 | # 2. Volume members must be in Optimal state |
1810 | - for member in volume['topology'].values(): |
1811 | - disk = data['disks'][member['enc']][member['slot']] |
1812 | - if disk['State'] != OPTIMAL: |
1813 | + for member in volume["topology"].values(): |
1814 | + disk = data["disks"][member["enc"]][member["slot"]] |
1815 | + if disk["State"] != OPTIMAL: |
1816 | msg = "Disk {}:{} {}".format( |
1817 | - member['enc'], |
1818 | - member['slot'], |
1819 | - disk['State'] |
1820 | + member["enc"], member["slot"], disk["State"] |
1821 | ) |
1822 | - if disk['State'] == READY: |
1823 | + if disk["State"] == READY: |
1824 | status.warn(msg) |
1825 | else: |
1826 | status.crit(msg) |
1827 | # 3. Disks can be in Optimal or Ready state ("ready" is ok for non-RAID |
1828 | # members) |
1829 | - for enclosure_id, enclosure in data['disks'].items(): |
1830 | + for enclosure_id, enclosure in data["disks"].items(): |
1831 | for slot_id, slot in enclosure.items(): |
1832 | - if slot['State'] not in [OPTIMAL, READY]: |
1833 | - status.crit("Disk {}:{} {}".format( |
1834 | - enclosure_id, |
1835 | - slot_id, |
1836 | - slot['State'] |
1837 | - )) |
1838 | + if slot["State"] not in [OPTIMAL, READY]: |
1839 | + status.crit( |
1840 | + "Disk {}:{} {}".format(enclosure_id, slot_id, slot["State"]) |
1841 | + ) |
1842 | status.get_status() |
1843 | |
1844 | |
1845 | class Status: |
1846 | - ''' |
1847 | + """ |
1848 | Class hiding the whole "CRIT >> WARN >> OK" priority scheme |
1849 | - ''' |
1850 | - def __init__(self, status='OK'): |
1851 | + """ |
1852 | + |
1853 | + def __init__(self, status="OK"): |
1854 | self._status = status |
1855 | self._msgs = set() |
1856 | |
1857 | def crit(self, msg): |
1858 | - self._status = 'CRITICAL' |
1859 | + self._status = "CRITICAL" |
1860 | self._msgs.add(msg) |
1861 | |
1862 | def warn(self, msg): |
1863 | - if self._status != 'CRITICAL': |
1864 | - self._status = 'WARNING' |
1865 | + if self._status != "CRITICAL": |
1866 | + self._status = "WARNING" |
1867 | self._msgs.add(msg) |
1868 | |
1869 | def ok(self, msg): |
1870 | self._msgs.add(msg) |
1871 | |
1872 | def get_status(self): |
1873 | - ''' |
1874 | + """ |
1875 | Render the current status, rasing nagios_plugin3 exceptions if things |
1876 | are not OK |
1877 | - ''' |
1878 | - if self._status == 'OK': |
1879 | - msg = '{}: no errors'.format(self._status) |
1880 | + """ |
1881 | + if self._status == "OK": |
1882 | + msg = "{}: no errors".format(self._status) |
1883 | print(msg) |
1884 | else: |
1885 | - msg = '{}: {}'.format(self._status, |
1886 | - ' | '.join(self._msgs)) |
1887 | - if self._status == 'CRITICAL': |
1888 | + msg = "{}: {}".format(self._status, " | ".join(self._msgs)) |
1889 | + if self._status == "CRITICAL": |
1890 | raise CriticalError(msg) |
1891 | - elif self._status == 'WARNING': |
1892 | + elif self._status == "WARNING": |
1893 | raise WarnError(msg) |
1894 | else: |
1895 | # this really shouldn't be happening |
1896 | @@ -200,10 +192,7 @@ class Status: |
1897 | |
1898 | |
1899 | def parse_args(argv=None): |
1900 | - parser = HWCheckArgumentParser( |
1901 | - prog='check_sas3ircu', |
1902 | - def_input_file=INPUT_FILE, |
1903 | - ) |
1904 | + parser = HWCheckArgumentParser(prog="check_sas3ircu", def_input_file=INPUT_FILE) |
1905 | return parser.parse_args(args=argv, namespace=ARGS) |
1906 | |
1907 | |
1908 | @@ -213,5 +202,5 @@ def main(argv=None): |
1909 | try_check(eval_status, data) |
1910 | |
1911 | |
1912 | -if __name__ == '__main__': |
1913 | +if __name__ == "__main__": |
1914 | main(sys.argv[1:]) |
1915 | diff --git a/src/files/ssacli/cron_ssacli.py b/src/files/ssacli/cron_ssacli.py |
1916 | index 94c7ef7..eadd711 100755 |
1917 | --- a/src/files/ssacli/cron_ssacli.py |
1918 | +++ b/src/files/ssacli/cron_ssacli.py |
1919 | @@ -25,7 +25,7 @@ try: |
1920 | except ImportError: |
1921 | # shared lib will be under $CHARM_SOURCE_DIR/files/common during unit tests |
1922 | common_libs_dir = os.path.abspath( |
1923 | - os.path.join(os.path.dirname(__file__), '..', 'common') |
1924 | + os.path.join(os.path.dirname(__file__), "..", "common") |
1925 | ) |
1926 | if common_libs_dir not in sys.path: |
1927 | sys.path.append(common_libs_dir) |
1928 | @@ -37,16 +37,14 @@ except ImportError: |
1929 | HPArgumentParser, |
1930 | ) |
1931 | |
1932 | -SSACLI_BIN = '/opt/smartstorageadmin/ssacli/bin/ssacli' |
1933 | -OUTPUT_FILE = '/var/lib/nagios/ssacli.out' |
1934 | -EXCLUDE_FILE = '/etc/nagios/ssacli.exclude.yaml' |
1935 | +SSACLI_BIN = "/opt/smartstorageadmin/ssacli/bin/ssacli" |
1936 | +OUTPUT_FILE = "/var/lib/nagios/ssacli.out" |
1937 | +EXCLUDE_FILE = "/etc/nagios/ssacli.exclude.yaml" |
1938 | |
1939 | |
1940 | def parse_args(argv=None): |
1941 | parser = HPArgumentParser( |
1942 | - prog='cron_ssacli', |
1943 | - def_write_file=OUTPUT_FILE, |
1944 | - def_exclude_file=EXCLUDE_FILE |
1945 | + prog="cron_ssacli", def_write_file=OUTPUT_FILE, def_exclude_file=EXCLUDE_FILE |
1946 | ) |
1947 | return parser.parse_args(args=argv) |
1948 | |
1949 | @@ -60,38 +58,46 @@ def check_array(slot): |
1950 | physicaldrive 1:1 (box 1:bay 1, 72 GB): OK |
1951 | physicaldrive 1:2 (box 1:bay 2, 72 GB): OK |
1952 | """ |
1953 | - if os.path.isfile('/etc/nagios/skip-cat-hp-array.txt'): |
1954 | + if os.path.isfile("/etc/nagios/skip-cat-hp-array.txt"): |
1955 | return |
1956 | |
1957 | cmd = ( |
1958 | - '{ssacli} ctrl slot={slot} ld all show status; ' |
1959 | - '{ssacli} ctrl slot={slot} pd all show status'.format(ssacli=SSACLI_BIN, slot=slot) |
1960 | + "{ssacli} ctrl slot={slot} ld all show status; " |
1961 | + "{ssacli} ctrl slot={slot} pd all show status".format( |
1962 | + ssacli=SSACLI_BIN, slot=slot |
1963 | + ) |
1964 | ) |
1965 | try: |
1966 | - result = subprocess.check_output(cmd, shell=True).decode('UTF-8') |
1967 | + result = subprocess.check_output(cmd, shell=True).decode("UTF-8") |
1968 | return _parse_array_output(result) |
1969 | except subprocess.CalledProcessError as e: |
1970 | return ( |
1971 | - 'UNKNOWN Call to ssacli to show ld/pd info failed. ' |
1972 | - 'Array Slot {} - Return Code {} - {}' |
1973 | - ''.format(slot, e.returncode, e.output) |
1974 | + "UNKNOWN Call to ssacli to show ld/pd info failed. " |
1975 | + "Array Slot {} - Return Code {} - {}" |
1976 | + "".format(slot, e.returncode, e.output) |
1977 | ) |
1978 | |
1979 | |
1980 | def _parse_array_output(output): |
1981 | - innocuous_errors = re.compile(r'^Error: The specified (device|controller) ' |
1982 | - 'does not have any (logical|physical)') |
1983 | - drive_status_line = re.compile(r'^\s*(logicaldrive|physicaldrive)') |
1984 | - ignore_file = '/etc/nagios/ignores/ignores-cat-hp-array.txt' |
1985 | + innocuous_errors = re.compile( |
1986 | + r"^Error: The specified (device|controller) " |
1987 | + "does not have any (logical|physical)" |
1988 | + ) |
1989 | + drive_status_line = re.compile(r"^\s*(logicaldrive|physicaldrive)") |
1990 | + ignore_file = "/etc/nagios/ignores/ignores-cat-hp-array.txt" |
1991 | ignores = read_ignore_file(ignore_file) |
1992 | |
1993 | for line in output.splitlines(): |
1994 | line = line.strip() |
1995 | - if not line or innocuous_errors.search(line) or not drive_status_line.search(line): |
1996 | + if ( |
1997 | + not line |
1998 | + or innocuous_errors.search(line) |
1999 | + or not drive_status_line.search(line) |
2000 | + ): |
2001 | continue |
2002 | (drivetype, number) = line.split()[:2] |
2003 | - status = line.split('):')[1].lstrip().upper() |
2004 | - if status != 'OK': |
2005 | + status = line.split("):")[1].lstrip().upper() |
2006 | + if status != "OK": |
2007 | err = '{} {} is "{}"'.format(drivetype, number, status) |
2008 | if not ignore(err, ignores): |
2009 | return err |
2010 | @@ -107,24 +113,24 @@ def check_controller(slot): |
2011 | Cache Status: OK |
2012 | Battery Status: Failed (Replace Batteries) |
2013 | """ |
2014 | - if os.path.isfile('/etc/nagios/skip-cat-hp-controller.txt'): |
2015 | + if os.path.isfile("/etc/nagios/skip-cat-hp-controller.txt"): |
2016 | return |
2017 | |
2018 | - cmd = '{ssacli} ctrl slot={slot} show status'.format(ssacli=SSACLI_BIN, slot=slot) |
2019 | + cmd = "{ssacli} ctrl slot={slot} show status".format(ssacli=SSACLI_BIN, slot=slot) |
2020 | try: |
2021 | - result = subprocess.check_output(cmd, shell=True).decode('UTF-8') |
2022 | + result = subprocess.check_output(cmd, shell=True).decode("UTF-8") |
2023 | return _parse_controller_output(result) |
2024 | except subprocess.CalledProcessError as e: |
2025 | return ( |
2026 | - 'UNKNOWN Call to ssacli to show ld/pd info failed. ' |
2027 | - 'Array Slot {} - Return Code {} - {}' |
2028 | - ''.format(slot, e.returncode, e.output) |
2029 | + "UNKNOWN Call to ssacli to show ld/pd info failed. " |
2030 | + "Array Slot {} - Return Code {} - {}" |
2031 | + "".format(slot, e.returncode, e.output) |
2032 | ) |
2033 | |
2034 | |
2035 | def _parse_controller_output(output): |
2036 | controller = "Unknown" |
2037 | - ignore_file = '/etc/nagios/ignores/ignores-cat-hp-controller.txt' |
2038 | + ignore_file = "/etc/nagios/ignores/ignores-cat-hp-controller.txt" |
2039 | ignores = read_ignore_file(ignore_file) |
2040 | for line in output.splitlines(): |
2041 | line = line.strip() |
2042 | @@ -151,7 +157,7 @@ def main(): |
2043 | |
2044 | slots = get_hp_controller_slots() |
2045 | if not slots: |
2046 | - msg = 'OK: no controller/array found to check' |
2047 | + msg = "OK: no controller/array found to check" |
2048 | exit = 0 |
2049 | |
2050 | errors = [] |
2051 | @@ -160,19 +166,19 @@ def main(): |
2052 | errors += check_array(slot) |
2053 | |
2054 | if len(errors) > 0: |
2055 | - msg = 'CRIT {} error(s): {}'.format(len(errors), ' - '.join(errors)) |
2056 | + msg = "CRIT {} error(s): {}".format(len(errors), " - ".join(errors)) |
2057 | exit = 2 |
2058 | else: |
2059 | - msg = 'OK No errors found' |
2060 | + msg = "OK No errors found" |
2061 | exit = 0 |
2062 | |
2063 | if ARGS.write: |
2064 | - with open(ARGS.write, 'w') as f: |
2065 | + with open(ARGS.write, "w") as f: |
2066 | f.write(msg) |
2067 | else: |
2068 | print(msg) |
2069 | sys.exit(exit) |
2070 | |
2071 | |
2072 | -if __name__ == '__main__': |
2073 | +if __name__ == "__main__": |
2074 | main() |
2075 | diff --git a/src/lib/hwhealth/discovery/lshw.py b/src/lib/hwhealth/discovery/lshw.py |
2076 | index c2f6653..1e52304 100644 |
2077 | --- a/src/lib/hwhealth/discovery/lshw.py |
2078 | +++ b/src/lib/hwhealth/discovery/lshw.py |
2079 | @@ -7,48 +7,45 @@ from charmhelpers.core import hookenv |
2080 | |
2081 | |
2082 | class Hardware(object): |
2083 | - def __init__(self, filename='/var/run/hw_health_lshw.json'): |
2084 | + def __init__(self, filename="/var/run/hw_health_lshw.json"): |
2085 | self.__filename = filename |
2086 | self._lshw = self.__load_hwinfo() |
2087 | |
2088 | def __load_hwinfo(self): |
2089 | try: |
2090 | if os.path.exists(self.__filename): |
2091 | - with open(self.__filename, 'r') as fd: |
2092 | + with open(self.__filename, "r") as fd: |
2093 | hwinfo = json.load(fd) |
2094 | else: |
2095 | - output = subprocess.check_output(['lshw', '-json']) |
2096 | + output = subprocess.check_output(["lshw", "-json"]) |
2097 | # Note(aluria): py35 does not support extra args on |
2098 | # subprocess.check_output |
2099 | - output_str = output.decode(errors='ignore') |
2100 | + output_str = output.decode(errors="ignore") |
2101 | hwinfo = json.loads(output_str) |
2102 | - with open(self.__filename, 'w') as fd: |
2103 | + with open(self.__filename, "w") as fd: |
2104 | fd.write(output_str) |
2105 | |
2106 | return hwinfo |
2107 | except PermissionError as error: |
2108 | - hookenv.log('lshw io error: {}'.format(error), |
2109 | - hookenv.ERROR) |
2110 | + hookenv.log("lshw io error: {}".format(error), hookenv.ERROR) |
2111 | return {} |
2112 | except subprocess.CalledProcessError as error: |
2113 | - hookenv.log('lshw subprocess error: {}'.format(error), |
2114 | - hookenv.ERROR) |
2115 | + hookenv.log("lshw subprocess error: {}".format(error), hookenv.ERROR) |
2116 | return {} |
2117 | except json.JSONDecodeError as error: |
2118 | - hookenv.log('lshw json error: {}'.format(error), |
2119 | - hookenv.ERROR) |
2120 | + hookenv.log("lshw json error: {}".format(error), hookenv.ERROR) |
2121 | return {} |
2122 | |
2123 | @property |
2124 | def get_system(self): |
2125 | """Helper to get vendor info retrieved via actions |
2126 | """ |
2127 | - keys = 'id description vendor product version serial'.split() |
2128 | + keys = "id description vendor product version serial".split() |
2129 | sysinfo = {} |
2130 | for k in keys: |
2131 | v = self._lshw.get(k) |
2132 | - if k == 'id': |
2133 | - k = 'hostname' |
2134 | + if k == "id": |
2135 | + k = "hostname" |
2136 | sysinfo.update({k: v}) |
2137 | return sysinfo |
2138 | |
2139 | @@ -56,22 +53,20 @@ class Hardware(object): |
2140 | def get_motherboard(self): |
2141 | """Helper to get vendor info retrieved via actions |
2142 | """ |
2143 | - keys = 'description vendor product version serial'.split() |
2144 | + keys = "description vendor product version serial".split() |
2145 | buses = [] |
2146 | - for child in self._lshw.get('children', [{}]): |
2147 | - if child.get('class') != 'bus': |
2148 | + for child in self._lshw.get("children", [{}]): |
2149 | + if child.get("class") != "bus": |
2150 | continue |
2151 | buses.append(dict([(k, child.get(k)) for k in keys])) |
2152 | return buses |
2153 | |
2154 | - def _get_inspect_bridges(self, bridge_item, bridge_class='storage'): |
2155 | + def _get_inspect_bridges(self, bridge_item, bridge_class="storage"): |
2156 | bridge_class_items = [] |
2157 | - for item in bridge_item.get('children', [{}]): |
2158 | - if item.get('class', '') == 'bridge': |
2159 | - bridge_class_items.extend( |
2160 | - self._get_inspect_bridges(item, bridge_class) |
2161 | - ) |
2162 | - elif item.get('class', '') == bridge_class: |
2163 | + for item in bridge_item.get("children", [{}]): |
2164 | + if item.get("class", "") == "bridge": |
2165 | + bridge_class_items.extend(self._get_inspect_bridges(item, bridge_class)) |
2166 | + elif item.get("class", "") == bridge_class: |
2167 | bridge_class_items.append(item) |
2168 | return bridge_class_items |
2169 | |
2170 | @@ -85,18 +80,16 @@ class Hardware(object): |
2171 | """ |
2172 | storage = [] |
2173 | # system -> bus -> bridge -> storage |
2174 | - for bus in self._lshw.get('children', [{}]): |
2175 | - if bus.get('class', '') != 'bus': |
2176 | + for bus in self._lshw.get("children", [{}]): |
2177 | + if bus.get("class", "") != "bus": |
2178 | continue |
2179 | - for bridge in bus.get('children', [{}]): |
2180 | - if bridge.get('class', '') != 'bridge': |
2181 | + for bridge in bus.get("children", [{}]): |
2182 | + if bridge.get("class", "") != "bridge": |
2183 | continue |
2184 | - for item in bridge.get('children', [{}]): |
2185 | - if item.get('class', '') == 'bridge': |
2186 | - storage.extend( |
2187 | - self._get_inspect_bridges(item, 'storage') |
2188 | - ) |
2189 | - elif item.get('class', '') == 'storage': |
2190 | + for item in bridge.get("children", [{}]): |
2191 | + if item.get("class", "") == "bridge": |
2192 | + storage.extend(self._get_inspect_bridges(item, "storage")) |
2193 | + elif item.get("class", "") == "storage": |
2194 | storage.append(item) |
2195 | return storage |
2196 | |
2197 | @@ -108,15 +101,15 @@ class Hardware(object): |
2198 | businfo and linux driver The aim of the function is to easily parse |
2199 | products to detect which tool(s) need to be used. |
2200 | """ |
2201 | - keys = 'vendor product businfo'.split() |
2202 | - config_keys = ['driver'] |
2203 | + keys = "vendor product businfo".split() |
2204 | + config_keys = ["driver"] |
2205 | storage = [] |
2206 | for item in self._get_storage_class: |
2207 | storage_item = dict([(k, item.get(k)) for k in keys]) |
2208 | storage_item.update( |
2209 | - dict([(k, item.get('configuration', {}).get(k)) |
2210 | - for k in config_keys])) |
2211 | - storage_item.update({'has_children': 'children' in item}) |
2212 | + dict([(k, item.get("configuration", {}).get(k)) for k in config_keys]) |
2213 | + ) |
2214 | + storage_item.update({"has_children": "children" in item}) |
2215 | storage.append(storage_item) |
2216 | |
2217 | return storage |
2218 | @@ -133,14 +126,14 @@ class Hardware(object): |
2219 | The aim of the function is to easily parse products to detect which |
2220 | tool(s) need to be used. |
2221 | """ |
2222 | - keys = 'product serial businfo physid dev size logicalname'.split() |
2223 | + keys = "product serial businfo physid dev size logicalname".split() |
2224 | disks = [] |
2225 | for item in self._get_storage_class: |
2226 | - for child in item.get('children', [{}]): |
2227 | - if child.get('class', '') != 'disk': |
2228 | + for child in item.get("children", [{}]): |
2229 | + if child.get("class", "") != "disk": |
2230 | continue |
2231 | disk = dict([(k, child.get(k)) for k in keys]) |
2232 | - disk.update({'storage_parent': item.get('product')}) |
2233 | + disk.update({"storage_parent": item.get("product")}) |
2234 | disks.append(disk) |
2235 | return disks |
2236 | |
2237 | @@ -153,28 +146,28 @@ class Hardware(object): |
2238 | The aim of the function is to easily parse products to detect which |
2239 | tool(s) need to be used. |
2240 | """ |
2241 | - keys = 'vendor product businfo logicalname serial'.split() |
2242 | - config_keys = 'driver driverversion firmware speed'.split() |
2243 | + keys = "vendor product businfo logicalname serial".split() |
2244 | + config_keys = "driver driverversion firmware speed".split() |
2245 | nics = [] |
2246 | # system -> bus -> bridge -> network |
2247 | - for bus in self._lshw.get('children', [{}]): |
2248 | - if bus.get('class', '') != 'bus': |
2249 | + for bus in self._lshw.get("children", [{}]): |
2250 | + if bus.get("class", "") != "bus": |
2251 | continue |
2252 | - for bridge in bus.get('children', [{}]): |
2253 | - if bridge.get('class', '') != 'bridge': |
2254 | + for bridge in bus.get("children", [{}]): |
2255 | + if bridge.get("class", "") != "bridge": |
2256 | continue |
2257 | - for item in bridge.get('children', [{}]): |
2258 | - if item.get('class', '') == 'bridge': |
2259 | - nics.extend(self._get_inspect_bridges(item, 'network')) |
2260 | - elif item.get('class', '') == 'network': |
2261 | + for item in bridge.get("children", [{}]): |
2262 | + if item.get("class", "") == "bridge": |
2263 | + nics.extend(self._get_inspect_bridges(item, "network")) |
2264 | + elif item.get("class", "") == "network": |
2265 | nics.append(item) |
2266 | |
2267 | nics_filtered = [] |
2268 | for nic in nics: |
2269 | nic_item = dict([(k, nic.get(k)) for k in keys]) |
2270 | nic_item.update( |
2271 | - dict([(k, nic.get('configuration', {}).get(k)) |
2272 | - for k in config_keys])) |
2273 | + dict([(k, nic.get("configuration", {}).get(k)) for k in config_keys]) |
2274 | + ) |
2275 | nics_filtered.append(nic_item) |
2276 | return nics_filtered |
2277 | |
2278 | @@ -182,60 +175,67 @@ class Hardware(object): |
2279 | def formatted_system_info(self): |
2280 | ctxt = self.get_system |
2281 | return ( |
2282 | - '{description}: vendor[{vendor}], product_name[{product}], ' |
2283 | - 'version[{version}], serial[{serial}], hostname[{hostname}]' |
2284 | + "{description}: vendor[{vendor}], product_name[{product}], " |
2285 | + "version[{version}], serial[{serial}], hostname[{hostname}]" |
2286 | ).format(**ctxt) |
2287 | |
2288 | @property |
2289 | def formatted_motherboard_info(self): |
2290 | - return '\n'.join([ |
2291 | - '{description}: vendor[{vendor}], product_name[{product}], ' |
2292 | - 'version[{version}], serial[{serial}]'.format(**ctxt) |
2293 | - for ctxt in self.get_motherboard]) |
2294 | + return "\n".join( |
2295 | + [ |
2296 | + "{description}: vendor[{vendor}], product_name[{product}], " |
2297 | + "version[{version}], serial[{serial}]".format(**ctxt) |
2298 | + for ctxt in self.get_motherboard |
2299 | + ] |
2300 | + ) |
2301 | |
2302 | @property |
2303 | def formatted_storage_class_info(self): |
2304 | - LINE = ('driver[{driver}], businfo[{businfo}], ' |
2305 | - 'has_children[{has_children}]') |
2306 | + LINE = "driver[{driver}], businfo[{businfo}], has_children[{has_children}]" |
2307 | ctxts = [] |
2308 | for ctxt in self.get_storage_class_info: |
2309 | - if ctxt.get('vendor') and ctxt.get('product'): |
2310 | - tmpl = ('Storage class: vendor[{vendor}],' |
2311 | - 'product_name[{product}], ') + LINE |
2312 | + if ctxt.get("vendor") and ctxt.get("product"): |
2313 | + tmpl = ( |
2314 | + "Storage class: vendor[{vendor}],product_name[{product}], " |
2315 | + ) + LINE |
2316 | else: |
2317 | - tmpl = 'Storage class: {}'.format(LINE) |
2318 | + tmpl = "Storage class: {}".format(LINE) |
2319 | ctxts.append(tmpl.format(**ctxt)) |
2320 | |
2321 | if ctxts: |
2322 | - return '\n'.join(ctxts) |
2323 | + return "\n".join(ctxts) |
2324 | |
2325 | @property |
2326 | def formatted_disk_class_info(self): |
2327 | - return '\n'.join([ |
2328 | - 'Disk class: ld[{logicalname}], dev[{dev}], physid[{physid}], ' |
2329 | - 'businfo[{businfo}], product_name[{product}], serial[{serial}], ' |
2330 | - 'size[{size}], storage_parent[{storage_parent}]'.format(**ctxt) |
2331 | - for ctxt in self.get_disk_class_info |
2332 | - ]) |
2333 | + return "\n".join( |
2334 | + [ |
2335 | + "Disk class: ld[{logicalname}], dev[{dev}], physid[{physid}], " |
2336 | + "businfo[{businfo}], product_name[{product}], serial[{serial}], " |
2337 | + "size[{size}], storage_parent[{storage_parent}]".format(**ctxt) |
2338 | + for ctxt in self.get_disk_class_info |
2339 | + ] |
2340 | + ) |
2341 | |
2342 | @property |
2343 | def formatted_network_class_info(self): |
2344 | - return '\n'.join([ |
2345 | - 'NIC: iface[{logicalname}], businfo[{businfo}], vendor[{vendor}]' |
2346 | - ', product_name[{product}], firmware[{firmware}], driver[{driver}' |
2347 | - ', {driverversion}], serial[{serial}]' |
2348 | - ', speed[{speed}]'.format(**ctxt) |
2349 | - for ctxt in self.get_network_class_info |
2350 | - ]) |
2351 | - |
2352 | - |
2353 | -if __name__ == '__main__': |
2354 | + return "\n".join( |
2355 | + [ |
2356 | + "NIC: iface[{logicalname}], businfo[{businfo}], vendor[{vendor}]" |
2357 | + ", product_name[{product}], firmware[{firmware}], driver[{driver}" |
2358 | + ", {driverversion}], serial[{serial}]" |
2359 | + ", speed[{speed}]".format(**ctxt) |
2360 | + for ctxt in self.get_network_class_info |
2361 | + ] |
2362 | + ) |
2363 | + |
2364 | + |
2365 | +if __name__ == "__main__": |
2366 | hw = Hardware() |
2367 | print(hw.formatted_system_info) |
2368 | print(hw.formatted_motherboard_info) |
2369 | - print('\n== get_storage_classes') |
2370 | + print("\n== get_storage_classes") |
2371 | print(hw.formatted_storage_class_info) |
2372 | - print('== get_disk_classes') |
2373 | + print("== get_disk_classes") |
2374 | print(hw.formatted_disk_class_info) |
2375 | - print('\n== get_network_class_info') |
2376 | + print("\n== get_network_class_info") |
2377 | print(hw.formatted_network_class_info) |
2378 | diff --git a/src/lib/hwhealth/discovery/supported_vendors.py b/src/lib/hwhealth/discovery/supported_vendors.py |
2379 | index ef37a5a..459f078 100644 |
2380 | --- a/src/lib/hwhealth/discovery/supported_vendors.py |
2381 | +++ b/src/lib/hwhealth/discovery/supported_vendors.py |
2382 | @@ -2,37 +2,32 @@ |
2383 | from hwhealth import tools |
2384 | |
2385 | SUPPORTED_STORAGE = { |
2386 | - 'LSI Logic / Symbios Logic': { |
2387 | - 'SAS2308 PCI-Express Fusion-MPT SAS-2': tools.Sas2Ircu, |
2388 | - 'SAS3008 PCI-Express Fusion-MPT SAS-3': tools.Sas3Ircu, |
2389 | - 'MegaRAID SAS-3 3108 [Invader]': tools.MegaCLI, |
2390 | + "LSI Logic / Symbios Logic": { |
2391 | + "SAS2308 PCI-Express Fusion-MPT SAS-2": tools.Sas2Ircu, |
2392 | + "SAS3008 PCI-Express Fusion-MPT SAS-3": tools.Sas3Ircu, |
2393 | + "MegaRAID SAS-3 3108 [Invader]": tools.MegaCLI, |
2394 | }, |
2395 | # 'Mellanox Technologies': { |
2396 | # 'MT27710 Family [ConnectX-4 Lx]': lambda: 'mlxconfig', |
2397 | # 'MT27700 Family [ConnectX-4]': lambda: 'mlxconfig', |
2398 | # }, |
2399 | - 'Intel Corporation': { |
2400 | - 'PCIe Data Center SSD': tools.Nvme, |
2401 | + "Intel Corporation": {"PCIe Data Center SSD": tools.Nvme}, |
2402 | + "Samsung Electronics Co Ltd": { |
2403 | + "NVMe SSD Controller SM961/PM961": tools.Nvme, |
2404 | + "NVMe SSD Controller 172Xa/172Xb": tools.Nvme, |
2405 | }, |
2406 | - 'Samsung Electronics Co Ltd': { |
2407 | - 'NVMe SSD Controller SM961/PM961': tools.Nvme, |
2408 | - 'NVMe SSD Controller 172Xa/172Xb': tools.Nvme, |
2409 | - }, |
2410 | - 'Hewlett-Packard Company': { |
2411 | - 'Smart Array Gen9 Controllers': tools.SsaCli, |
2412 | - 'Smart Storage PQI 12G SAS/PCIe 3': tools.ILOrest, |
2413 | + "Hewlett-Packard Company": { |
2414 | + "Smart Array Gen9 Controllers": tools.SsaCli, |
2415 | + "Smart Storage PQI 12G SAS/PCIe 3": tools.ILOrest, |
2416 | }, |
2417 | } |
2418 | |
2419 | SUPPORTED_SYSTEMS = { |
2420 | - 'HPE': { |
2421 | + "HPE": { |
2422 | # tools.HpLog, # not sure if this works on gen10+ |
2423 | tools.ILOrest, |
2424 | }, |
2425 | - 'HP': { |
2426 | - tools.HpLog, |
2427 | - tools.SsaCli, |
2428 | - } |
2429 | + "HP": {tools.HpLog, tools.SsaCli}, |
2430 | } |
2431 | |
2432 | SUPPORTED_DRIVERS = { |
2433 | @@ -41,5 +36,5 @@ SUPPORTED_DRIVERS = { |
2434 | # 'apt': 'hioa', |
2435 | # 'tool': lambda: 'hio_info', |
2436 | # }, |
2437 | - 'nvme': tools.Nvme, |
2438 | + "nvme": tools.Nvme, |
2439 | } |
2440 | diff --git a/src/lib/hwhealth/hwdiscovery.py b/src/lib/hwhealth/hwdiscovery.py |
2441 | index c23ee13..fe27403 100644 |
2442 | --- a/src/lib/hwhealth/hwdiscovery.py |
2443 | +++ b/src/lib/hwhealth/hwdiscovery.py |
2444 | @@ -10,23 +10,23 @@ from hwhealth.discovery.lshw import Hardware |
2445 | from hwhealth.discovery.supported_vendors import ( |
2446 | SUPPORTED_STORAGE, |
2447 | SUPPORTED_DRIVERS, |
2448 | - SUPPORTED_SYSTEMS |
2449 | + SUPPORTED_SYSTEMS, |
2450 | ) |
2451 | |
2452 | from charmhelpers.core import hookenv |
2453 | |
2454 | |
2455 | -def get_tools(manufacturer='auto'): |
2456 | +def get_tools(manufacturer="auto"): |
2457 | """Return list of tool classes relevent for the current hardware. |
2458 | |
2459 | In testing, we set manufacturer = test in order to test all tools classes. |
2460 | Filtering added for known bad tools that don't work on all series combinations. |
2461 | """ |
2462 | - if manufacturer == 'test': |
2463 | + if manufacturer == "test": |
2464 | # Return all possible tools to aid testing |
2465 | - storage_tools = {tool |
2466 | - for vendor in SUPPORTED_STORAGE.values() |
2467 | - for tool in vendor.values()} |
2468 | + storage_tools = { |
2469 | + tool for vendor in SUPPORTED_STORAGE.values() for tool in vendor.values() |
2470 | + } |
2471 | # Some system vendors have multiple tools, have to iterate sets |
2472 | system_tools = set(chain.from_iterable(SUPPORTED_SYSTEMS.values())) |
2473 | driver_tools = set(SUPPORTED_DRIVERS.values()) |
2474 | @@ -35,7 +35,7 @@ def get_tools(manufacturer='auto'): |
2475 | tool for tool in all_tools if tool.is_series_supported() |
2476 | ) |
2477 | return series_filtered_tools |
2478 | - elif manufacturer == 'auto': |
2479 | + elif manufacturer == "auto": |
2480 | return _get_tools() |
2481 | else: |
2482 | raise NotImplementedError |
2483 | @@ -46,41 +46,43 @@ def _get_tools(): |
2484 | hwinfo = Hardware() |
2485 | toolset = set() |
2486 | for storage in hwinfo.get_storage_class_info: |
2487 | - vendor = storage.get('vendor') |
2488 | - product = storage.get('product') |
2489 | + vendor = storage.get("vendor") |
2490 | + product = storage.get("product") |
2491 | tool = SUPPORTED_STORAGE.get(vendor, {}).get(product) |
2492 | if isinstance(tool, list) or isinstance(tool, set): |
2493 | toolset.update(tool) |
2494 | elif tool: |
2495 | toolset.add(tool) |
2496 | else: |
2497 | - hookenv.log('Product not supported: [{}][{}]' |
2498 | - ''.format(vendor, product), hookenv.DEBUG) |
2499 | + hookenv.log( |
2500 | + "Product not supported: [{}][{}]".format(vendor, product), |
2501 | + hookenv.DEBUG, |
2502 | + ) |
2503 | |
2504 | - driver = storage.get('driver') |
2505 | + driver = storage.get("driver") |
2506 | if driver: |
2507 | if driver in SUPPORTED_DRIVERS: |
2508 | toolset.add(SUPPORTED_DRIVERS[driver]) |
2509 | continue |
2510 | - hookenv.log('Driver not supported: {}'.format(driver), |
2511 | - hookenv.DEBUG) |
2512 | + hookenv.log("Driver not supported: {}".format(driver), hookenv.DEBUG) |
2513 | |
2514 | # SW RAID? |
2515 | if _supports_mdadm(): |
2516 | toolset.add(tools.Mdadm) |
2517 | |
2518 | - if hookenv.config('enable_ipmi'): |
2519 | + if hookenv.config("enable_ipmi"): |
2520 | toolset.add(tools.Ipmi) |
2521 | |
2522 | - system_vendor = hwinfo.get_system.get('vendor') |
2523 | + system_vendor = hwinfo.get_system.get("vendor") |
2524 | tool = SUPPORTED_SYSTEMS.get(system_vendor) |
2525 | if isinstance(tool, list) or isinstance(tool, set): |
2526 | toolset.update(tool) |
2527 | elif tool: |
2528 | toolset.add(tool) |
2529 | else: |
2530 | - hookenv.log('System vendor not supported: {}'.format(system_vendor), |
2531 | - hookenv.DEBUG) |
2532 | + hookenv.log( |
2533 | + "System vendor not supported: {}".format(system_vendor), hookenv.DEBUG |
2534 | + ) |
2535 | |
2536 | executed_toolset = set([tool() for tool in toolset if tool.is_series_supported]) |
2537 | return executed_toolset |
2538 | @@ -91,27 +93,24 @@ def _supports_mdadm(): |
2539 | |
2540 | Returns True when the first one is found; otherwise, it returns False) |
2541 | """ |
2542 | - if os.path.exists('/sbin/mdadm'): |
2543 | + if os.path.exists("/sbin/mdadm"): |
2544 | try: |
2545 | - devices_raw = subprocess.check_output( |
2546 | - ['/sbin/mdadm', '--detail', '--scan'] |
2547 | - ) |
2548 | - devices_re = re.compile(r'^ARRAY\s+(\S+) ') |
2549 | + devices_raw = subprocess.check_output(["/sbin/mdadm", "--detail", "--scan"]) |
2550 | + devices_re = re.compile(r"^ARRAY\s+(\S+) ") |
2551 | for line in devices_raw.splitlines(): |
2552 | line = line.decode().strip() |
2553 | raid_dev = devices_re.search(line) |
2554 | if raid_dev: |
2555 | - hookenv.log("Found md raid array {}" |
2556 | - "".format(raid_dev.group(1))) |
2557 | + hookenv.log("Found md raid array {}".format(raid_dev.group(1))) |
2558 | return True |
2559 | except Exception as e: |
2560 | hookenv.log("mdadm scan failed with {}".format(e)) |
2561 | return False |
2562 | |
2563 | |
2564 | -if __name__ == '__main__': |
2565 | +if __name__ == "__main__": |
2566 | toolset = get_tools() |
2567 | if not toolset: |
2568 | - print('No RAID') |
2569 | + print("No RAID") |
2570 | else: |
2571 | print(toolset) |
2572 | diff --git a/src/lib/hwhealth/tools.py b/src/lib/hwhealth/tools.py |
2573 | index e90cb6b..da3615d 100644 |
2574 | --- a/src/lib/hwhealth/tools.py |
2575 | +++ b/src/lib/hwhealth/tools.py |
2576 | @@ -24,12 +24,14 @@ from charms import apt |
2577 | class JujuResourceNotFound(Exception): |
2578 | """Resource needed but not attached |
2579 | """ |
2580 | + |
2581 | pass |
2582 | |
2583 | |
2584 | class ToolError(Exception): |
2585 | """Allows a dict to be shared with try-except |
2586 | """ |
2587 | + |
2588 | def __init__(self, keymap): |
2589 | self._keymap = keymap |
2590 | |
2591 | @@ -41,16 +43,18 @@ class ToolError(Exception): |
2592 | class ToolChecksumError(ToolError): |
2593 | """Resource does not match a whitelisted checksum |
2594 | """ |
2595 | + |
2596 | pass |
2597 | |
2598 | |
2599 | class ToolNotFound(ToolError): |
2600 | """Resource found (zipfile) but does not contain a needed binary |
2601 | """ |
2602 | + |
2603 | pass |
2604 | |
2605 | |
2606 | -class Tool(): |
2607 | +class Tool: |
2608 | """An abstract class representing a "tool". |
2609 | |
2610 | The idea is to delegate install/configure duties to specific per-tool |
2611 | @@ -59,21 +63,22 @@ class Tool(): |
2612 | Every tool should implement its own internal logic regarding how to be |
2613 | installed, configured, and removed. |
2614 | """ |
2615 | - CROND_DIR = '/etc/cron.d' |
2616 | + |
2617 | + CROND_DIR = "/etc/cron.d" |
2618 | CRONJOB_SCRIPT_MODE = 0o100755 |
2619 | CRONJOB_SCRIPT_UID = 0 |
2620 | CRONJOB_SCRIPT_GID = 0 |
2621 | - CRONJOB_OUTPUT_DIR = '/var/lib/nagios' |
2622 | - NRPE_PLUGINS_DIR = '/usr/local/lib/nagios/plugins' |
2623 | + CRONJOB_OUTPUT_DIR = "/var/lib/nagios" |
2624 | + NRPE_PLUGINS_DIR = "/usr/local/lib/nagios/plugins" |
2625 | NRPE_PLUGINS_MODE = 0o100755 |
2626 | NRPE_PLUGINS_UID = 0 |
2627 | NRPE_PLUGINS_GID = 0 |
2628 | - SUDOERS_DIR = '/etc/sudoers.d' |
2629 | + SUDOERS_DIR = "/etc/sudoers.d" |
2630 | SUDOERS_MODE = 0o100440 |
2631 | SUDOERS_UID = 0 |
2632 | SUDOERS_GID = 0 |
2633 | - SUPPORTED_SERIES = ['xenial', 'bionic', 'focal'] |
2634 | - TOOLS_DIR = '/usr/local/bin' |
2635 | + SUPPORTED_SERIES = ["xenial", "bionic", "focal"] |
2636 | + TOOLS_DIR = "/usr/local/bin" |
2637 | TOOLS_MODE = 0o100755 |
2638 | TOOLS_UID = 0 |
2639 | TOOLS_GID = 0 |
2640 | @@ -81,7 +86,7 @@ class Tool(): |
2641 | def __init__( |
2642 | self, |
2643 | shortname=None, |
2644 | - nrpe_opts='', |
2645 | + nrpe_opts="", |
2646 | nrpe_script=None, |
2647 | nrpe_script_dir=None, |
2648 | cron_script=None, |
2649 | @@ -90,25 +95,21 @@ class Tool(): |
2650 | ): |
2651 | self._nagios_hostname = nrpe.get_nagios_hostname() |
2652 | self._nrpe_opts = nrpe_opts |
2653 | - self._shortname = (shortname if shortname |
2654 | - else self.__class__.__name__.lower()) |
2655 | - self._files_dir = os.path.join(hookenv.charm_dir(), |
2656 | - 'files', |
2657 | - self._shortname) |
2658 | - self._nrpe_script = (nrpe_script if nrpe_script |
2659 | - else 'check_{}.py'.format(self._shortname)) |
2660 | - self._nrpe_script_dir = (nrpe_script_dir if nrpe_script_dir |
2661 | - else self._files_dir) |
2662 | - self._cron_script = (cron_script if cron_script |
2663 | - else 'cron_{}.py'.format(self._shortname)) |
2664 | - self._cron_script_dir = (cron_script_dir if cron_script_dir |
2665 | - else self._files_dir) |
2666 | - self._templates_dir = os.path.join(hookenv.charm_dir(), |
2667 | - 'templates', |
2668 | - self._shortname) |
2669 | - self._common_libs_dir = os.path.join(hookenv.charm_dir(), |
2670 | - 'files/common') |
2671 | - self._common_libs = ['hw_health_lib.py'] |
2672 | + self._shortname = shortname if shortname else self.__class__.__name__.lower() |
2673 | + self._files_dir = os.path.join(hookenv.charm_dir(), "files", self._shortname) |
2674 | + self._nrpe_script = ( |
2675 | + nrpe_script if nrpe_script else "check_{}.py".format(self._shortname) |
2676 | + ) |
2677 | + self._nrpe_script_dir = nrpe_script_dir if nrpe_script_dir else self._files_dir |
2678 | + self._cron_script = ( |
2679 | + cron_script if cron_script else "cron_{}.py".format(self._shortname) |
2680 | + ) |
2681 | + self._cron_script_dir = cron_script_dir if cron_script_dir else self._files_dir |
2682 | + self._templates_dir = os.path.join( |
2683 | + hookenv.charm_dir(), "templates", self._shortname |
2684 | + ) |
2685 | + self._common_libs_dir = os.path.join(hookenv.charm_dir(), "files/common") |
2686 | + self._common_libs = ["hw_health_lib.py"] |
2687 | self._cron_script_args = cron_script_args |
2688 | |
2689 | def _install_nrpe_plugin(self): |
2690 | @@ -117,9 +118,9 @@ class Tool(): |
2691 | os.chmod(dst, self.NRPE_PLUGINS_MODE) |
2692 | os.chown(dst, uid=self.NRPE_PLUGINS_UID, gid=self.NRPE_PLUGINS_GID) |
2693 | hookenv.log( |
2694 | - 'NRPE script for tool [{}] installed at as {}' |
2695 | - ''.format(self._shortname, dst), |
2696 | - hookenv.DEBUG |
2697 | + "NRPE script for tool [{}] installed at as {}" |
2698 | + "".format(self._shortname, dst), |
2699 | + hookenv.DEBUG, |
2700 | ) |
2701 | return dst |
2702 | |
2703 | @@ -131,9 +132,9 @@ class Tool(): |
2704 | os.chmod(dst, self.NRPE_PLUGINS_MODE) |
2705 | os.chown(dst, uid=self.NRPE_PLUGINS_UID, gid=self.NRPE_PLUGINS_GID) |
2706 | hookenv.log( |
2707 | - 'Common Library {} for tool [{}] installed at as {}' |
2708 | - ''.format(lib, self._shortname, dst), |
2709 | - hookenv.DEBUG |
2710 | + "Common Library {} for tool [{}] installed at as {}" |
2711 | + "".format(lib, self._shortname, dst), |
2712 | + hookenv.DEBUG, |
2713 | ) |
2714 | dsts.append(dst) |
2715 | return dsts |
2716 | @@ -144,48 +145,42 @@ class Tool(): |
2717 | return |
2718 | os.remove(plugin_path) |
2719 | hookenv.log( |
2720 | - 'deleted NRPE script for tool [{}]'.format(self._shortname), |
2721 | - hookenv.DEBUG |
2722 | + "deleted NRPE script for tool [{}]".format(self._shortname), hookenv.DEBUG |
2723 | ) |
2724 | |
2725 | def configure_nrpe_check(self, nrpe_setup): |
2726 | - cmd = ' '.join([os.path.basename(self._nrpe_script), self._nrpe_opts]) |
2727 | + cmd = " ".join([os.path.basename(self._nrpe_script), self._nrpe_opts]) |
2728 | nrpe_setup.add_check( |
2729 | shortname=self._shortname, |
2730 | - description='{} Hardware Health'.format(self._shortname), |
2731 | + description="{} Hardware Health".format(self._shortname), |
2732 | check_cmd=cmd, |
2733 | ) |
2734 | hookenv.log( |
2735 | - 'configured NRPE check for tool [{}]'.format(self._shortname), |
2736 | - hookenv.DEBUG |
2737 | + "configured NRPE check for tool [{}]".format(self._shortname), hookenv.DEBUG |
2738 | ) |
2739 | |
2740 | def remove_nrpe_check(self): |
2741 | nrpe_setup = nrpe.NRPE(hostname=self._nagios_hostname, primary=False) |
2742 | - cmd = ' '.join([self._nrpe_script, self._nrpe_opts]) |
2743 | - nrpe_setup.remove_check( |
2744 | - shortname=self._shortname, |
2745 | - check_cmd=cmd |
2746 | - ) |
2747 | + cmd = " ".join([self._nrpe_script, self._nrpe_opts]) |
2748 | + nrpe_setup.remove_check(shortname=self._shortname, check_cmd=cmd) |
2749 | nrpe_setup.write() |
2750 | hookenv.log( |
2751 | - 'removed NRPE check for tool [{}]'.format(self._shortname), |
2752 | - hookenv.DEBUG |
2753 | + "removed NRPE check for tool [{}]".format(self._shortname), hookenv.DEBUG |
2754 | ) |
2755 | |
2756 | def install(self): |
2757 | self._install_common_libs() |
2758 | self._install_nrpe_plugin() |
2759 | - hookenv.log('Installed tool [{}]'.format(self._shortname)) |
2760 | + hookenv.log("Installed tool [{}]".format(self._shortname)) |
2761 | |
2762 | def remove(self): |
2763 | self.remove_nrpe_check() |
2764 | self._remove_nrpe_plugin() |
2765 | - hookenv.log('Removed tool [{}]'.format(self._shortname)) |
2766 | + hookenv.log("Removed tool [{}]".format(self._shortname)) |
2767 | |
2768 | @classmethod |
2769 | def is_series_supported(cls): |
2770 | - series = lsb_release()['DISTRIB_CODENAME'] |
2771 | + series = lsb_release()["DISTRIB_CODENAME"] |
2772 | |
2773 | # BUG(lp#1890652) The following works around xenial layer-apt bug during test |
2774 | if ( |
2775 | @@ -197,7 +192,7 @@ class Tool(): |
2776 | |
2777 | return series in cls.SUPPORTED_SERIES |
2778 | |
2779 | - def _install_cronjob(self, cron_user='root'): |
2780 | + def _install_cronjob(self, cron_user="root"): |
2781 | assert self._cron_script is not None |
2782 | |
2783 | # Copy the cronjob script to the nagios plugins directory |
2784 | @@ -206,17 +201,15 @@ class Tool(): |
2785 | os.chmod(dst, self.CRONJOB_SCRIPT_MODE) |
2786 | os.chown(dst, uid=self.CRONJOB_SCRIPT_UID, gid=self.CRONJOB_SCRIPT_GID) |
2787 | hookenv.log( |
2788 | - 'Cronjob script [{}] copied to {}' |
2789 | - ''.format(self._cron_script, self.NRPE_PLUGINS_DIR), |
2790 | - hookenv.DEBUG |
2791 | + "Cronjob script [{}] copied to {}" |
2792 | + "".format(self._cron_script, self.NRPE_PLUGINS_DIR), |
2793 | + hookenv.DEBUG, |
2794 | ) |
2795 | |
2796 | cmdline = [dst] |
2797 | - if self._cron_script_args \ |
2798 | - and isinstance(self._cron_script_args, str): |
2799 | - cmdline.extend([shlex.quote(arg) |
2800 | - for arg in self._cron_script_args.split()]) |
2801 | - elif hookenv.config('manufacturer') != 'test': |
2802 | + if self._cron_script_args and isinstance(self._cron_script_args, str): |
2803 | + cmdline.extend([shlex.quote(arg) for arg in self._cron_script_args.split()]) |
2804 | + elif hookenv.config("manufacturer") != "test": |
2805 | # Run it once to generate the temp file unless we're on a test |
2806 | # container, otherwise the nrpe check # might fail at first. |
2807 | # For security reasons, cronjobs that allow parameters shared |
2808 | @@ -227,39 +220,32 @@ class Tool(): |
2809 | # Generate random cronjob execution (internal in minutes) |
2810 | cron_interval = 5 |
2811 | minutes_offsets = [] |
2812 | - minute_num = binascii.crc_hqx( |
2813 | - ''.join(cmdline).encode(), 0) % cron_interval |
2814 | + minute_num = binascii.crc_hqx("".join(cmdline).encode(), 0) % cron_interval |
2815 | while minute_num < 60: |
2816 | minutes_offsets.append(str(minute_num)) |
2817 | minute_num += cron_interval |
2818 | - cronjob_line = '{minutes} * * * * {user} {cmd}\n'.format( |
2819 | - minutes=','.join(minutes_offsets), user=cron_user, |
2820 | - cmd=' '.join(cmdline)) |
2821 | + cronjob_line = "{minutes} * * * * {user} {cmd}\n".format( |
2822 | + minutes=",".join(minutes_offsets), user=cron_user, cmd=" ".join(cmdline) |
2823 | + ) |
2824 | |
2825 | - crond_file = os.path.join(self.CROND_DIR, |
2826 | - 'hwhealth_{}'.format(self._shortname)) |
2827 | - with open(crond_file, 'w') as crond_fd: |
2828 | + crond_file = os.path.join(self.CROND_DIR, "hwhealth_{}".format(self._shortname)) |
2829 | + with open(crond_file, "w") as crond_fd: |
2830 | crond_fd.write(cronjob_line) |
2831 | - hookenv.log( |
2832 | - 'Cronjob configured at {}'.format(crond_file), |
2833 | - hookenv.DEBUG |
2834 | - ) |
2835 | + hookenv.log("Cronjob configured at {}".format(crond_file), hookenv.DEBUG) |
2836 | return dst |
2837 | |
2838 | def _remove_cronjob(self): |
2839 | assert self._cron_script is not None |
2840 | |
2841 | - crond_file = os.path.join(self.CROND_DIR, |
2842 | - 'hwhealth_{}'.format(self._shortname)) |
2843 | + crond_file = os.path.join(self.CROND_DIR, "hwhealth_{}".format(self._shortname)) |
2844 | cron_script = os.path.join(self.NRPE_PLUGINS_DIR, self._cron_script) |
2845 | for filename in (crond_file, cron_script): |
2846 | if not os.path.exists(filename): |
2847 | continue |
2848 | os.remove(filename) |
2849 | hookenv.log( |
2850 | - 'Removed cronjob files [{}, {}]' |
2851 | - ''.format(crond_file, cron_script), |
2852 | - hookenv.DEBUG |
2853 | + "Removed cronjob files [{}, {}]".format(crond_file, cron_script), |
2854 | + hookenv.DEBUG, |
2855 | ) |
2856 | |
2857 | def _remove_sudoer(self): |
2858 | @@ -267,8 +253,7 @@ class Tool(): |
2859 | if not sudoer_path.exists(): |
2860 | return |
2861 | sudoer_path.unlink() |
2862 | - hookenv.log('deleted sudoer file: {}'.format(sudoer_path), |
2863 | - hookenv.DEBUG) |
2864 | + hookenv.log("deleted sudoer file: {}".format(sudoer_path), hookenv.DEBUG) |
2865 | |
2866 | |
2867 | class VendorTool(Tool): |
2868 | @@ -279,6 +264,7 @@ class VendorTool(Tool): |
2869 | cronjob that runs as root and saves the tool output in a temporary file |
2870 | that nrpe can read (as nagios user). |
2871 | """ |
2872 | + |
2873 | def __init__(self, *args, **kwargs): |
2874 | super().__init__(*args, **kwargs) |
2875 | self.checksums = [] |
2876 | @@ -294,30 +280,32 @@ class VendorTool(Tool): |
2877 | super().remove() |
2878 | |
2879 | def _install_from_resource(self): |
2880 | - resource = hookenv.resource_get('tools') |
2881 | + resource = hookenv.resource_get("tools") |
2882 | if not resource: |
2883 | - raise JujuResourceNotFound('tools') |
2884 | + raise JujuResourceNotFound("tools") |
2885 | else: |
2886 | hookenv.log( |
2887 | - 'Installing tool [{}] from resource'.format(self._shortname), |
2888 | - hookenv.DEBUG |
2889 | + "Installing tool [{}] from resource".format(self._shortname), |
2890 | + hookenv.DEBUG, |
2891 | ) |
2892 | # Move in from a temp directory to be atomic |
2893 | with TemporaryDirectory() as tmpdir: |
2894 | try: |
2895 | - with ZipFile(resource, 'r') as zipfile: |
2896 | + with ZipFile(resource, "r") as zipfile: |
2897 | tmpfile = zipfile.extract(self._shortname, tmpdir) |
2898 | # Verify checksum |
2899 | checksum = hashlib.sha256() |
2900 | - with open(tmpfile, 'rb') as fd: |
2901 | + with open(tmpfile, "rb") as fd: |
2902 | checksum.update(fd.read()) |
2903 | if checksum.hexdigest() not in self.checksums: |
2904 | - checksums_string = ', '.join(self.checksums) |
2905 | - raise ToolChecksumError({ |
2906 | - 'shortname': self._shortname, |
2907 | - 'checksum': checksum.hexdigest(), |
2908 | - 'expected_checksums': checksums_string |
2909 | - }) |
2910 | + checksums_string = ", ".join(self.checksums) |
2911 | + raise ToolChecksumError( |
2912 | + { |
2913 | + "shortname": self._shortname, |
2914 | + "checksum": checksum.hexdigest(), |
2915 | + "expected_checksums": checksums_string, |
2916 | + } |
2917 | + ) |
2918 | # We could just use self.TOOLS_DIR as a destination |
2919 | # here, but shutil.move refuses to overwrite the |
2920 | # destination file unless it receives a full path |
2921 | @@ -327,28 +315,24 @@ class VendorTool(Tool): |
2922 | os.chown(dst, uid=self.TOOLS_UID, gid=self.TOOLS_GID) |
2923 | |
2924 | except BadZipFile as error: |
2925 | - hookenv.log('BadZipFile: {}'.format(error), hookenv.ERROR) |
2926 | + hookenv.log("BadZipFile: {}".format(error), hookenv.ERROR) |
2927 | |
2928 | except PermissionError as error: |
2929 | hookenv.log( |
2930 | - 'Unable to unzip tool {} ' |
2931 | - 'from the provided resource: {}' |
2932 | - ''.format(self._shortname, error), |
2933 | - hookenv.ERROR |
2934 | + "Unable to unzip tool {} " |
2935 | + "from the provided resource: {}" |
2936 | + "".format(self._shortname, error), |
2937 | + hookenv.ERROR, |
2938 | ) |
2939 | except KeyError as error: |
2940 | - raise ToolNotFound({'shortname': self._shortname, |
2941 | - 'error': error}) |
2942 | + raise ToolNotFound({"shortname": self._shortname, "error": error}) |
2943 | |
2944 | def _remove_binary(self): |
2945 | binary_path = Path(self.TOOLS_DIR) / self._shortname |
2946 | if not binary_path.exists(): |
2947 | return |
2948 | binary_path.unlink() |
2949 | - hookenv.log( |
2950 | - 'Removed binary tool {}'.format(binary_path), |
2951 | - hookenv.DEBUG |
2952 | - ) |
2953 | + hookenv.log("Removed binary tool {}".format(binary_path), hookenv.DEBUG) |
2954 | |
2955 | |
2956 | class AptVendorTool(Tool): |
2957 | @@ -402,51 +386,42 @@ class AptVendorTool(Tool): |
2958 | -----END PGP PUBLIC KEY BLOCK-----""" |
2959 | |
2960 | HPE_MCP_REPO_TMPL = ( |
2961 | - 'deb http://downloads.linux.hpe.com/SDR/repo/mcp {series}/current-gen9 non-free' |
2962 | + "deb http://downloads.linux.hpe.com/SDR/repo/mcp {series}/current-gen9 non-free" |
2963 | ) |
2964 | HPE_ILOREST_REPO_TMPL = ( |
2965 | - 'deb http://downloads.linux.hpe.com/SDR/repo/ilorest {series}/current non-free' |
2966 | + "deb http://downloads.linux.hpe.com/SDR/repo/ilorest {series}/current non-free" |
2967 | ) |
2968 | |
2969 | # HP doesn't have focal APT sources as of yet |
2970 | APT_SOURCES = { |
2971 | - 'ssacli': { |
2972 | - 'xenial': HPE_MCP_REPO_TMPL.format(series='xenial'), |
2973 | - 'bionic': HPE_MCP_REPO_TMPL.format(series='bionic'), |
2974 | + "ssacli": { |
2975 | + "xenial": HPE_MCP_REPO_TMPL.format(series="xenial"), |
2976 | + "bionic": HPE_MCP_REPO_TMPL.format(series="bionic"), |
2977 | }, |
2978 | - 'ilorest': { |
2979 | - 'xenial': HPE_ILOREST_REPO_TMPL.format(series='xenial'), |
2980 | - 'bionic': HPE_ILOREST_REPO_TMPL.format(series='bionic'), |
2981 | + "ilorest": { |
2982 | + "xenial": HPE_ILOREST_REPO_TMPL.format(series="xenial"), |
2983 | + "bionic": HPE_ILOREST_REPO_TMPL.format(series="bionic"), |
2984 | }, |
2985 | - 'hplog': { |
2986 | - 'xenial': HPE_MCP_REPO_TMPL.format(series='xenial'), |
2987 | - 'bionic': HPE_MCP_REPO_TMPL.format(series='bionic'), |
2988 | + "hplog": { |
2989 | + "xenial": HPE_MCP_REPO_TMPL.format(series="xenial"), |
2990 | + "bionic": HPE_MCP_REPO_TMPL.format(series="bionic"), |
2991 | }, |
2992 | } |
2993 | APT_KEYS = { |
2994 | - 'ssacli': { |
2995 | - 'xenial': HPE_MCP_KEY, |
2996 | - 'bionic': HPE_MCP_KEY, |
2997 | - }, |
2998 | - 'ilorest': { |
2999 | - 'xenial': HPE_ILOREST_KEY, |
3000 | - 'bionic': HPE_ILOREST_KEY, |
3001 | - }, |
3002 | - 'hplog': { |
3003 | - 'xenial': HPE_MCP_KEY, |
3004 | - 'bionic': HPE_MCP_KEY, |
3005 | - }, |
3006 | + "ssacli": {"xenial": HPE_MCP_KEY, "bionic": HPE_MCP_KEY}, |
3007 | + "ilorest": {"xenial": HPE_ILOREST_KEY, "bionic": HPE_ILOREST_KEY}, |
3008 | + "hplog": {"xenial": HPE_MCP_KEY, "bionic": HPE_MCP_KEY}, |
3009 | } |
3010 | |
3011 | def __init__(self, shortname=None, apt_packages=[]): |
3012 | super().__init__( |
3013 | shortname=shortname, |
3014 | - nrpe_script='check_hw_health_cron_output.py', |
3015 | - nrpe_opts='--filename {}/{}.out'.format(self.CRONJOB_OUTPUT_DIR, |
3016 | - shortname) |
3017 | + nrpe_script="check_hw_health_cron_output.py", |
3018 | + nrpe_opts="--filename {}/{}.out".format(self.CRONJOB_OUTPUT_DIR, shortname), |
3019 | + ) |
3020 | + self.apt_packages = ( |
3021 | + apt_packages if apt_packages else [self.__class__.__name__.lower()] |
3022 | ) |
3023 | - self.apt_packages = (apt_packages if apt_packages |
3024 | - else [self.__class__.__name__.lower()]) |
3025 | self._nrpe_script_dir = self._common_libs_dir |
3026 | |
3027 | def install(self): |
3028 | @@ -471,38 +446,45 @@ class AptVendorTool(Tool): |
3029 | hardware present on the system. |
3030 | """ |
3031 | self._add_apt_source() |
3032 | - if hookenv.config('manufacturer') == 'test': |
3033 | + if hookenv.config("manufacturer") == "test": |
3034 | # If we are forcing install on a container for functional tests, |
3035 | # we should only download and not install the packages, as some |
3036 | # vendor tools depend on hardware to be preset to complete postinst |
3037 | # need one option added per package |
3038 | - apt.queue_install(self.apt_packages, options=["--download-only" for _ in self.apt_packages]) |
3039 | + apt.queue_install( |
3040 | + self.apt_packages, |
3041 | + options=["--download-only" for _ in self.apt_packages], |
3042 | + ) |
3043 | else: |
3044 | apt.queue_install(self.apt_packages) |
3045 | |
3046 | def _add_apt_source(self): |
3047 | - series = lsb_release()['DISTRIB_CODENAME'] |
3048 | - if self._shortname not in self.APT_SOURCES and self._shortname not in self.APT_KEYS: |
3049 | + series = lsb_release()["DISTRIB_CODENAME"] |
3050 | + if ( |
3051 | + self._shortname not in self.APT_SOURCES |
3052 | + and self._shortname not in self.APT_KEYS |
3053 | + ): |
3054 | return |
3055 | if series in self.APT_SOURCES[self._shortname]: |
3056 | - apt.add_source(self.APT_SOURCES[self._shortname][series], |
3057 | - key=self.APT_KEYS[self._shortname][series]) |
3058 | + apt.add_source( |
3059 | + self.APT_SOURCES[self._shortname][series], |
3060 | + key=self.APT_KEYS[self._shortname][series], |
3061 | + ) |
3062 | |
3063 | def _remove_packages(self): |
3064 | apt.purge(self.apt_packages) |
3065 | |
3066 | def install_cronjob(self): |
3067 | hookenv.log( |
3068 | - 'Attempting AptVendorTool cronjob script install [{}]' |
3069 | - ''.format(self._cron_script), |
3070 | - hookenv.DEBUG |
3071 | + "Attempting AptVendorTool cronjob script install [{}]" |
3072 | + "".format(self._cron_script), |
3073 | + hookenv.DEBUG, |
3074 | ) |
3075 | # Don't install a cronjob until the tools are installed |
3076 | if self.is_apt_installed(): |
3077 | hookenv.log( |
3078 | - 'calling _install_cronjob for {}' |
3079 | - ''.format(self._cron_script), |
3080 | - hookenv.DEBUG |
3081 | + "calling _install_cronjob for {}".format(self._cron_script), |
3082 | + hookenv.DEBUG, |
3083 | ) |
3084 | self._install_cronjob() |
3085 | |
3086 | @@ -521,7 +503,7 @@ class AptVendorTool(Tool): |
3087 | allows the reactive layer to know when the apt packages have finished |
3088 | installing so that it may go on to configure the nrpe layer of tools |
3089 | """ |
3090 | - if hookenv.config('manufacturer') == 'test': |
3091 | + if hookenv.config("manufacturer") == "test": |
3092 | # it is okay to skip this part in testing as layer-apt will block |
3093 | # if the package is not downloadable from sources |
3094 | return True |
3095 | @@ -537,11 +519,12 @@ class Sas3Ircu(VendorTool): |
3096 | |
3097 | This is a tool supporting the LSI SAS 12Gb/s controllers |
3098 | """ |
3099 | + |
3100 | def __init__(self): |
3101 | - super().__init__(cron_script='cron_sas3ircu.sh') |
3102 | + super().__init__(cron_script="cron_sas3ircu.sh") |
3103 | self.checksums = [ |
3104 | - 'f150eb37bb332668949a3eccf9636e0e03f874aecd17a39d586082c6be1386bd', |
3105 | - 'd69967057992134df1b136f83bc775a641e32c4efc741def3ef6f6a25a9a14b5', |
3106 | + "f150eb37bb332668949a3eccf9636e0e03f874aecd17a39d586082c6be1386bd", |
3107 | + "d69967057992134df1b136f83bc775a641e32c4efc741def3ef6f6a25a9a14b5", |
3108 | ] |
3109 | |
3110 | |
3111 | @@ -550,9 +533,12 @@ class Sas2Ircu(VendorTool): |
3112 | |
3113 | This is a tool supporting the LSI SAS 6Gb/s controllers |
3114 | """ |
3115 | + |
3116 | def __init__(self): |
3117 | - super().__init__(cron_script='cron_sas2ircu.sh') |
3118 | - self.checksums = ['37467826d0b22aad47287efe70bb34e47f475d70e9b1b64cbd63f57607701e73'] # noqa: E501 |
3119 | + super().__init__(cron_script="cron_sas2ircu.sh") |
3120 | + self.checksums = [ |
3121 | + "37467826d0b22aad47287efe70bb34e47f475d70e9b1b64cbd63f57607701e73" |
3122 | + ] # noqa: E501 |
3123 | |
3124 | |
3125 | class MegaCLI(VendorTool): |
3126 | @@ -560,12 +546,13 @@ class MegaCLI(VendorTool): |
3127 | |
3128 | This is a tool supporting the LSI MegaRAID SAS controllers |
3129 | """ |
3130 | + |
3131 | def __init__(self): |
3132 | - super().__init__(cron_script='cron_megacli.sh') |
3133 | + super().__init__(cron_script="cron_megacli.sh") |
3134 | self.checksums = [ |
3135 | - '34f1a235543662615ee35f458317380b3f89fac0e415dee755e0dbc7c4cf6f92', |
3136 | - '1c4effe33ee5db82227e05925dd629771fd49c7d2be2382d48c48a864452cdec', |
3137 | - '1a68e6646d1e3dfb7039f581be994500d0ed02de2f928e57399e86473d4c8662', |
3138 | + "34f1a235543662615ee35f458317380b3f89fac0e415dee755e0dbc7c4cf6f92", |
3139 | + "1c4effe33ee5db82227e05925dd629771fd49c7d2be2382d48c48a864452cdec", |
3140 | + "1a68e6646d1e3dfb7039f581be994500d0ed02de2f928e57399e86473d4c8662", |
3141 | ] |
3142 | |
3143 | |
3144 | @@ -575,10 +562,10 @@ class HpLog(AptVendorTool): |
3145 | This is a tool supporting the LSI MegaRAID SAS controllers |
3146 | """ |
3147 | |
3148 | - SUPPORTED_SERIES = ['xenial', 'bionic'] |
3149 | + SUPPORTED_SERIES = ["xenial", "bionic"] |
3150 | |
3151 | def __init__(self): |
3152 | - super().__init__(apt_packages=['hp-health']) |
3153 | + super().__init__(apt_packages=["hp-health"]) |
3154 | |
3155 | |
3156 | class SsaCli(AptVendorTool): |
3157 | @@ -587,7 +574,7 @@ class SsaCli(AptVendorTool): |
3158 | This is a tool supporting the HP Smart Array controllers |
3159 | """ |
3160 | |
3161 | - SUPPORTED_SERIES = ['xenial', 'bionic'] |
3162 | + SUPPORTED_SERIES = ["xenial", "bionic"] |
3163 | |
3164 | def __init__(self): |
3165 | super().__init__() |
3166 | @@ -597,7 +584,7 @@ class ILOrest(AptVendorTool): |
3167 | """A class representing the ILOrest vendor tool (HPE hardware (Gen 10+) |
3168 | """ |
3169 | |
3170 | - SUPPORTED_SERIES = ['xenial', 'bionic'] |
3171 | + SUPPORTED_SERIES = ["xenial", "bionic"] |
3172 | |
3173 | def __init__(self): |
3174 | super().__init__() |
3175 | @@ -609,12 +596,13 @@ class Mdadm(VendorTool): |
3176 | Our mdadm check kind of behaves like a VendorTool for the purpose of |
3177 | installation as it has a cronjob + check script |
3178 | """ |
3179 | + |
3180 | def __init__(self): |
3181 | super().__init__() |
3182 | |
3183 | def install(self): |
3184 | # mdadm should already be installed, but let's check |
3185 | - fetch.apt_install(['mdadm'], fatal=True) |
3186 | + fetch.apt_install(["mdadm"], fatal=True) |
3187 | self._install_cronjob() |
3188 | # No vendor binary to install |
3189 | Tool.install(self) |
3190 | @@ -634,12 +622,13 @@ class Ipmi(Tool): |
3191 | install; the plugin relies only on freeipmi, a few perl modules, and the |
3192 | actual nrpe check, which is imported as a git submodule |
3193 | """ |
3194 | - def __init__(self, nrpe_opts=''): |
3195 | + |
3196 | + def __init__(self, nrpe_opts=""): |
3197 | super().__init__( |
3198 | - cron_script='cron_ipmi_sensors.py', |
3199 | - cron_script_args=hookenv.config('ipmi_check_options') |
3200 | + cron_script="cron_ipmi_sensors.py", |
3201 | + cron_script_args=hookenv.config("ipmi_check_options"), |
3202 | ) |
3203 | - self._sudoer_file = '99-check_ipmi_sensor' |
3204 | + self._sudoer_file = "99-check_ipmi_sensor" |
3205 | |
3206 | def configure_nrpe_check(self, nrpe_setup): |
3207 | # extra options for check_ipmi_sensors Perl script are configured in |
3208 | @@ -652,7 +641,7 @@ class Ipmi(Tool): |
3209 | self._install_sudoer() |
3210 | # Install Perl script called by the (Python) cronjob |
3211 | self._install_nrpe_helper_plugin() |
3212 | - self._install_cronjob(cron_user='nagios') |
3213 | + self._install_cronjob(cron_user="nagios") |
3214 | |
3215 | # Install the Python script called by check_nrpe |
3216 | super().install() |
3217 | @@ -665,13 +654,13 @@ class Ipmi(Tool): |
3218 | |
3219 | def _install_nrpe_helper_plugin(self): |
3220 | original_nrpe_script = self._nrpe_script |
3221 | - self._nrpe_script = 'check_ipmi_sensor' |
3222 | + self._nrpe_script = "check_ipmi_sensor" |
3223 | super()._install_nrpe_plugin() |
3224 | self._nrpe_script = original_nrpe_script |
3225 | |
3226 | def _remove_nrpe_helper_plugin(self): |
3227 | original_nrpe_script = self._nrpe_script |
3228 | - self._nrpe_script = 'check_ipmi_sensor' |
3229 | + self._nrpe_script = "check_ipmi_sensor" |
3230 | super()._remove_nrpe_plugin() |
3231 | self._nrpe_script = original_nrpe_script |
3232 | |
3233 | @@ -683,9 +672,8 @@ class Ipmi(Tool): |
3234 | os.chmod(dst, self.SUDOERS_MODE) |
3235 | os.chown(dst, uid=self.SUDOERS_UID, gid=self.SUDOERS_GID) |
3236 | hookenv.log( |
3237 | - 'sudoer file for tool [{}] installed at {}' |
3238 | - ''.format(self._shortname, dst), |
3239 | - hookenv.DEBUG |
3240 | + "sudoer file for tool [{}] installed at {}".format(self._shortname, dst), |
3241 | + hookenv.DEBUG, |
3242 | ) |
3243 | return dst |
3244 | |
3245 | @@ -696,15 +684,16 @@ class Nvme(Tool): |
3246 | This is a direct subclass of Tool because unlike a VendorTool we are not |
3247 | using a cronjob script |
3248 | """ |
3249 | + |
3250 | def __init__(self): |
3251 | super().__init__() |
3252 | - self._sudoer_template = '99-check_nvme.tmpl' |
3253 | - self._sudoer_file = '99-check_nvme' |
3254 | + self._sudoer_template = "99-check_nvme.tmpl" |
3255 | + self._sudoer_file = "99-check_nvme" |
3256 | self._cron_script = None |
3257 | |
3258 | def install(self): |
3259 | # mdadm should already be installed, but let's check |
3260 | - fetch.apt_install(['nvme-cli'], fatal=True) |
3261 | + fetch.apt_install(["nvme-cli"], fatal=True) |
3262 | self._render_sudoer() |
3263 | super().install() |
3264 | |
3265 | @@ -722,24 +711,29 @@ class Nvme(Tool): |
3266 | if not devices: |
3267 | return |
3268 | |
3269 | - devices = dict([('CHECK{}'.format(dev.replace('/', '_').upper()), dev) |
3270 | - for dev in devices]) |
3271 | - ctxt = {'devices': devices} |
3272 | - ctxt['devices_cmnd_aliases'] = ', '.join(devices.keys()) |
3273 | - |
3274 | - render(source=src, target=dst, context=ctxt, perms=self.SUDOERS_MODE, |
3275 | - templates_dir=None) |
3276 | + devices = dict( |
3277 | + [("CHECK{}".format(dev.replace("/", "_").upper()), dev) for dev in devices] |
3278 | + ) |
3279 | + ctxt = {"devices": devices} |
3280 | + ctxt["devices_cmnd_aliases"] = ", ".join(devices.keys()) |
3281 | + |
3282 | + render( |
3283 | + source=src, |
3284 | + target=dst, |
3285 | + context=ctxt, |
3286 | + perms=self.SUDOERS_MODE, |
3287 | + templates_dir=None, |
3288 | + ) |
3289 | hookenv.log( |
3290 | - 'sudoer file for tool [{}] installed at {}'.format(self._shortname, |
3291 | - dst), |
3292 | - hookenv.DEBUG |
3293 | + "sudoer file for tool [{}] installed at {}".format(self._shortname, dst), |
3294 | + hookenv.DEBUG, |
3295 | ) |
3296 | return dst |
3297 | |
3298 | def __get_nvme_devices(self): |
3299 | devices = [] |
3300 | - for device in glob.glob('/dev/nvme*'): |
3301 | - nvme_re = re.match(r'^/dev/nvme\d+$', device) |
3302 | + for device in glob.glob("/dev/nvme*"): |
3303 | + nvme_re = re.match(r"^/dev/nvme\d+$", device) |
3304 | if not nvme_re: |
3305 | continue |
3306 | devices.append(nvme_re.group()) |
3307 | diff --git a/src/reactive/hw_health.py b/src/reactive/hw_health.py |
3308 | index a04f790..ff72e85 100644 |
3309 | --- a/src/reactive/hw_health.py |
3310 | +++ b/src/reactive/hw_health.py |
3311 | @@ -12,167 +12,176 @@ from hwhealth import tools |
3312 | |
3313 | def _set_install_status(tool): |
3314 | if isinstance(tool, tools.VendorTool) and not isinstance(tool, tools.Mdadm): |
3315 | - status.maintenance('Installing from attached resource') |
3316 | + status.maintenance("Installing from attached resource") |
3317 | elif isinstance(tool, tools.AptVendorTool): |
3318 | - status.maintenance('Installing vendor tools via apt') |
3319 | - set_flag('hw-health.wait-for-vendor-apt') |
3320 | + status.maintenance("Installing vendor tools via apt") |
3321 | + set_flag("hw-health.wait-for-vendor-apt") |
3322 | |
3323 | |
3324 | -@when_none('hw-health.installed', 'hw-health.unsupported') |
3325 | -@when('nrpe-external-master.available') |
3326 | -@when('general-info.connected') |
3327 | +@when_none("hw-health.installed", "hw-health.unsupported") |
3328 | +@when("nrpe-external-master.available") |
3329 | +@when("general-info.connected") |
3330 | def install(): |
3331 | - manufacturer = hookenv.config('manufacturer') |
3332 | - if host.is_container() and manufacturer != 'test': |
3333 | - status.blocked('Containers are not supported') |
3334 | - set_flag('hw-health.unsupported') |
3335 | + manufacturer = hookenv.config("manufacturer") |
3336 | + if host.is_container() and manufacturer != "test": |
3337 | + status.blocked("Containers are not supported") |
3338 | + set_flag("hw-health.unsupported") |
3339 | return |
3340 | |
3341 | - if manufacturer not in ['auto', 'test']: |
3342 | - status.blocked('manufacturer needs to be set to auto') |
3343 | + if manufacturer not in ["auto", "test"]: |
3344 | + status.blocked("manufacturer needs to be set to auto") |
3345 | return |
3346 | |
3347 | # Detect hardware and return a list of tools we need to use |
3348 | - status.maintenance('Autodiscovering hardware') |
3349 | + status.maintenance("Autodiscovering hardware") |
3350 | toolset = get_tools(manufacturer) |
3351 | if not toolset: |
3352 | - status.blocked('Hardware not supported') |
3353 | - set_flag('hw-health.unsupported') |
3354 | + status.blocked("Hardware not supported") |
3355 | + set_flag("hw-health.unsupported") |
3356 | else: |
3357 | try: |
3358 | tool_list = list() |
3359 | for toolClass in toolset: |
3360 | tool = toolClass() |
3361 | _set_install_status(tool) |
3362 | - status.maintenance('Installing tool {}'.format(type(tool).__name__)) |
3363 | + status.maintenance("Installing tool {}".format(type(tool).__name__)) |
3364 | tool.install() |
3365 | # Save the class name in the unit kv db. This will be reused when |
3366 | # reconfiguring or removing the checks |
3367 | tool_list.append(type(tool).__name__) |
3368 | unitdb = unitdata.kv() |
3369 | - unitdb.set('toolset', tool_list) |
3370 | - set_flag('hw-health.installed') |
3371 | + unitdb.set("toolset", tool_list) |
3372 | + set_flag("hw-health.installed") |
3373 | except tools.JujuResourceNotFound as error: |
3374 | - hookenv.log('Missing Juju resource: {} - alternative method is not ' |
3375 | - ' available yet'.format(error), hookenv.ERROR) |
3376 | - status.blocked('Missing Juju resource: {}'.format(error)) |
3377 | - set_flag('hw-health.unsupported') |
3378 | + hookenv.log( |
3379 | + "Missing Juju resource: {} - alternative method is not " |
3380 | + " available yet".format(error), |
3381 | + hookenv.ERROR, |
3382 | + ) |
3383 | + status.blocked("Missing Juju resource: {}".format(error)) |
3384 | + set_flag("hw-health.unsupported") |
3385 | except tools.ToolChecksumError as error: |
3386 | msg = error.message |
3387 | - hookenv.log('checksum error: tool [{shortname}], checksum[{checksum}],' |
3388 | - ' expected[{expected_checksums}]'.format(**msg), hookenv.ERROR) |
3389 | - status.blocked('Tool {shortname} - checksum error'.format(**msg)) |
3390 | - set_flag('hw-health.unsupported') |
3391 | + hookenv.log( |
3392 | + "checksum error: tool [{shortname}], checksum[{checksum}]," |
3393 | + " expected[{expected_checksums}]".format(**msg), |
3394 | + hookenv.ERROR, |
3395 | + ) |
3396 | + status.blocked("Tool {shortname} - checksum error".format(**msg)) |
3397 | + set_flag("hw-health.unsupported") |
3398 | except tools.ToolNotFound as error: |
3399 | msg = error.message |
3400 | - hookenv.log('Tool {shortname} not found in the provided resource: ' |
3401 | - '{error}'.format(**msg), hookenv.ERROR) |
3402 | - status.blocked('Tool {shortname} not found'.format(**msg)) |
3403 | - set_flag('hw-health.unsupported') |
3404 | + hookenv.log( |
3405 | + "Tool {shortname} not found in the provided resource: " |
3406 | + "{error}".format(**msg), |
3407 | + hookenv.ERROR, |
3408 | + ) |
3409 | + status.blocked("Tool {shortname} not found".format(**msg)) |
3410 | + set_flag("hw-health.unsupported") |
3411 | |
3412 | |
3413 | -@hook('upgrade-charm') |
3414 | +@hook("upgrade-charm") |
3415 | def upgrade(): |
3416 | - clear_flag('hw-health.installed') |
3417 | - clear_flag('hw-health.unsupported') |
3418 | - clear_flag('hw-health.configured') |
3419 | - status.maintenance('Charm upgrade in progress') |
3420 | + clear_flag("hw-health.installed") |
3421 | + clear_flag("hw-health.unsupported") |
3422 | + clear_flag("hw-health.configured") |
3423 | + status.maintenance("Charm upgrade in progress") |
3424 | |
3425 | |
3426 | -@when('hw-health.installed') |
3427 | -@when_not('general-info.available') |
3428 | +@when("hw-health.installed") |
3429 | +@when_not("general-info.available") |
3430 | def remove_tools(): |
3431 | # If general-info is unavailable, the subordinate relationship towards the |
3432 | # principal charm has been broken, so we need to remove the installed tools |
3433 | unitdb = unitdata.kv() |
3434 | - for tool_class_name in unitdb.get('toolset', set()): |
3435 | + for tool_class_name in unitdb.get("toolset", set()): |
3436 | # Re-instantiate the tool from the saved class name |
3437 | tool_class = getattr(tools, tool_class_name) |
3438 | tool_class().remove() |
3439 | - clear_flag('hw-health.installed') |
3440 | - clear_flag('hw-health.unsupported') |
3441 | - clear_flag('hw-health.configured') |
3442 | + clear_flag("hw-health.installed") |
3443 | + clear_flag("hw-health.unsupported") |
3444 | + clear_flag("hw-health.configured") |
3445 | |
3446 | |
3447 | -@when('hw-health.wait-for-vendor-apt') |
3448 | +@when("hw-health.wait-for-vendor-apt") |
3449 | def wait_for_vendor_apt(): |
3450 | # cycle through any vendor tools that are of type AptVendorTool and |
3451 | # check if all packages needed are installed. If not, eject and wait |
3452 | unitdb = unitdata.kv() |
3453 | - for tool_class_name in unitdb.get('toolset', set()): |
3454 | + for tool_class_name in unitdb.get("toolset", set()): |
3455 | # Re-instantiate the tool from the saved class name |
3456 | tool_class = getattr(tools, tool_class_name) |
3457 | if isinstance(tool_class, tools.AptVendorTool): |
3458 | if tool_class.is_apt_installed(): |
3459 | tool_class.install_cronjob() |
3460 | else: |
3461 | - status.maintenance('Waiting for vendor tools to install via apt') |
3462 | + status.maintenance("Waiting for vendor tools to install via apt") |
3463 | return |
3464 | - clear_flag('hw-health.wait-for-vendor-apt') |
3465 | - clear_flag('hw-health.configured') |
3466 | + clear_flag("hw-health.wait-for-vendor-apt") |
3467 | + clear_flag("hw-health.configured") |
3468 | |
3469 | |
3470 | -@when('config.changed') |
3471 | -@when_not('config.changed.manufacturer') |
3472 | +@when("config.changed") |
3473 | +@when_not("config.changed.manufacturer") |
3474 | def config_changed(): |
3475 | - clear_flag('hw-health.configured') |
3476 | + clear_flag("hw-health.configured") |
3477 | |
3478 | |
3479 | -@when('config.changed.manufacturer') |
3480 | +@when("config.changed.manufacturer") |
3481 | def toolset_changed(): |
3482 | - if not is_flag_set('hw-health.installed'): |
3483 | + if not is_flag_set("hw-health.installed"): |
3484 | # Note(aluria): useful for testing purposes |
3485 | - clear_flag('hw-health.unsupported') |
3486 | + clear_flag("hw-health.unsupported") |
3487 | return |
3488 | |
3489 | # Changing the manufacturer option will trigger a reinstallation of the |
3490 | # tools |
3491 | remove_tools() |
3492 | - status.maintenance('Reinstallation of tools in progress') |
3493 | + status.maintenance("Reinstallation of tools in progress") |
3494 | |
3495 | |
3496 | -@when('hw-health.installed') |
3497 | -@when_not('nrpe-external-master.available') |
3498 | -@when_not('hw-health.configured') |
3499 | +@when("hw-health.installed") |
3500 | +@when_not("nrpe-external-master.available") |
3501 | +@when_not("hw-health.configured") |
3502 | def blocked_on_nrpe(): |
3503 | - status.blocked('Missing relations: nrpe-external-master') |
3504 | + status.blocked("Missing relations: nrpe-external-master") |
3505 | |
3506 | |
3507 | -@when('hw-health.installed') |
3508 | -@when('nrpe-external-master.available') |
3509 | -@when_not('hw-health.configured') |
3510 | +@when("hw-health.installed") |
3511 | +@when("nrpe-external-master.available") |
3512 | +@when_not("hw-health.configured") |
3513 | def configure_nrpe(): |
3514 | - if not os.path.exists('/var/lib/nagios'): |
3515 | - status.waiting('Waiting for nrpe package installation') |
3516 | + if not os.path.exists("/var/lib/nagios"): |
3517 | + status.waiting("Waiting for nrpe package installation") |
3518 | return |
3519 | |
3520 | - status.maintenance('Configuring nrpe checks') |
3521 | + status.maintenance("Configuring nrpe checks") |
3522 | |
3523 | nrpe_setup = nrpe.NRPE(primary=False) |
3524 | unitdb = unitdata.kv() |
3525 | - for tool_class_name in unitdb.get('toolset', set()): |
3526 | + for tool_class_name in unitdb.get("toolset", set()): |
3527 | # Re-instantiate the tool from the saved class name |
3528 | tool_class = getattr(tools, tool_class_name) |
3529 | tool_class().configure_nrpe_check(nrpe_setup) |
3530 | |
3531 | - if unitdb.get('toolset'): |
3532 | + if unitdb.get("toolset"): |
3533 | # Note(aluria): This needs to be run outside of |
3534 | # tool_class().configure_nrpe_check or shared dictionary with the |
3535 | # nagios unit will list the last added check (LP#1821602) |
3536 | nrpe_setup.write() |
3537 | |
3538 | - status.active('ready') |
3539 | - set_flag('hw-health.configured') |
3540 | + status.active("ready") |
3541 | + set_flag("hw-health.configured") |
3542 | |
3543 | |
3544 | -@when('hw-health.installed') |
3545 | -@when_not('nrpe-external-master.available') |
3546 | -@when('hw-health.configured') |
3547 | +@when("hw-health.installed") |
3548 | +@when_not("nrpe-external-master.available") |
3549 | +@when("hw-health.configured") |
3550 | def remove_nrpe_checks(): |
3551 | - status.maintenance('Removing nrpe checks') |
3552 | + status.maintenance("Removing nrpe checks") |
3553 | unitdb = unitdata.kv() |
3554 | - for tool_class_name in unitdb.get('toolset', set()): |
3555 | + for tool_class_name in unitdb.get("toolset", set()): |
3556 | # Re-instantiate the tool from the saved class name |
3557 | tool_class = getattr(tools, tool_class_name) |
3558 | tool_class().remove_nrpe_check() |
3559 | - clear_flag('hw-health.configured') |
3560 | + clear_flag("hw-health.configured") |
3561 | diff --git a/src/tests/download_nagios_plugin3.py b/src/tests/download_nagios_plugin3.py |
3562 | index b454caf..ded2851 100755 |
3563 | --- a/src/tests/download_nagios_plugin3.py |
3564 | +++ b/src/tests/download_nagios_plugin3.py |
3565 | @@ -3,9 +3,10 @@ from glob import glob |
3566 | import os.path |
3567 | import urllib.request |
3568 | |
3569 | -MODULE_NAME = 'nagios_plugin3.py' |
3570 | -MODULE_URL = os.path.join('https://git.launchpad.net/nrpe-charm/plain/files', |
3571 | - MODULE_NAME) |
3572 | +MODULE_NAME = "nagios_plugin3.py" |
3573 | +MODULE_URL = os.path.join( |
3574 | + "https://git.launchpad.net/nrpe-charm/plain/files", MODULE_NAME |
3575 | +) |
3576 | _cache = None |
3577 | |
3578 | |
3579 | @@ -18,11 +19,11 @@ def content(): |
3580 | |
3581 | |
3582 | def main(): |
3583 | - for i in glob('.tox/unit/lib/python3*/site-packages'): |
3584 | + for i in glob(".tox/unit/lib/python3*/site-packages"): |
3585 | mod_path = os.path.join(i, MODULE_NAME) |
3586 | if os.path.isdir(i) and not os.path.exists(mod_path): |
3587 | - open(mod_path, 'wb').write(content()) |
3588 | + open(mod_path, "wb").write(content()) |
3589 | |
3590 | |
3591 | -if __name__ == '__main__': |
3592 | +if __name__ == "__main__": |
3593 | main() |
3594 | diff --git a/src/tests/functional/conftest.py b/src/tests/functional/conftest.py |
3595 | index 9a65e83..5734800 100644 |
3596 | --- a/src/tests/functional/conftest.py |
3597 | +++ b/src/tests/functional/conftest.py |
3598 | @@ -1,5 +1,5 @@ |
3599 | #!/usr/bin/python3 |
3600 | -''' |
3601 | +""" |
3602 | Reusable pytest fixtures for functional testing |
3603 | |
3604 | Environment variables |
3605 | @@ -7,7 +7,7 @@ Environment variables |
3606 | |
3607 | test_preserve_model: |
3608 | if set, the testing model won't be torn down at the end of the testing session |
3609 | -''' |
3610 | +""" |
3611 | |
3612 | import asyncio |
3613 | import json |
3614 | @@ -18,7 +18,7 @@ import juju |
3615 | from juju.controller import Controller |
3616 | from juju.errors import JujuError |
3617 | |
3618 | -STAT_CMD = '''python3 - <<EOF |
3619 | +STAT_CMD = """python3 - <<EOF |
3620 | import json |
3621 | import os |
3622 | |
3623 | @@ -33,13 +33,13 @@ stat_json = json.dumps(stat_hash) |
3624 | print(stat_json) |
3625 | |
3626 | EOF |
3627 | -''' |
3628 | +""" |
3629 | |
3630 | |
3631 | -@pytest.yield_fixture(scope='module') |
3632 | +@pytest.yield_fixture(scope="module") |
3633 | def event_loop(): |
3634 | - '''Override the default pytest event loop to allow for fixtures using a |
3635 | - broader scope''' |
3636 | + """Override the default pytest event loop to allow for fixtures using a |
3637 | + broader scope""" |
3638 | loop = asyncio.get_event_loop_policy().new_event_loop() |
3639 | asyncio.set_event_loop(loop) |
3640 | loop.set_debug(True) |
3641 | @@ -48,23 +48,23 @@ def event_loop(): |
3642 | asyncio.set_event_loop(None) |
3643 | |
3644 | |
3645 | -@pytest.fixture(scope='module') |
3646 | +@pytest.fixture(scope="module") |
3647 | async def controller(): |
3648 | - '''Connect to the current controller''' |
3649 | + """Connect to the current controller""" |
3650 | _controller = Controller() |
3651 | await _controller.connect_current() |
3652 | yield _controller |
3653 | await _controller.disconnect() |
3654 | |
3655 | |
3656 | -@pytest.fixture(scope='module') |
3657 | +@pytest.fixture(scope="module") |
3658 | async def model(controller): # pylint: disable=redefined-outer-name |
3659 | - '''This model lives only for the duration of the test''' |
3660 | + """This model lives only for the duration of the test""" |
3661 | model_name = "functest-{}".format(uuid.uuid4()) |
3662 | _model = await controller.add_model(model_name) |
3663 | yield _model |
3664 | await _model.disconnect() |
3665 | - if not os.getenv('test_preserve_model'): |
3666 | + if not os.getenv("test_preserve_model"): |
3667 | await controller.destroy_model(model_name) |
3668 | while model_name in await controller.list_models(): |
3669 | await asyncio.sleep(1) |
3670 | @@ -72,30 +72,35 @@ async def model(controller): # pylint: disable=redefined-outer-name |
3671 | |
3672 | @pytest.fixture() |
3673 | async def get_app(model): # pylint: disable=redefined-outer-name |
3674 | - '''Returns the application requested''' |
3675 | + """Returns the application requested""" |
3676 | + |
3677 | async def _get_app(name): |
3678 | try: |
3679 | return model.applications[name] |
3680 | except KeyError: |
3681 | raise JujuError("Cannot find application {}".format(name)) |
3682 | + |
3683 | return _get_app |
3684 | |
3685 | |
3686 | @pytest.fixture() |
3687 | async def get_unit(model): # pylint: disable=redefined-outer-name |
3688 | - '''Returns the requested <app_name>/<unit_number> unit''' |
3689 | + """Returns the requested <app_name>/<unit_number> unit""" |
3690 | + |
3691 | async def _get_unit(name): |
3692 | try: |
3693 | - (app_name, unit_number) = name.split('/') |
3694 | + (app_name, unit_number) = name.split("/") |
3695 | return model.applications[app_name].units[unit_number] |
3696 | except (KeyError, ValueError): |
3697 | raise JujuError("Cannot find unit {}".format(name)) |
3698 | + |
3699 | return _get_unit |
3700 | |
3701 | |
3702 | @pytest.fixture() |
3703 | async def get_entity(get_unit, get_app): # pylint: disable=redefined-outer-name |
3704 | - '''Returns a unit or an application''' |
3705 | + """Returns a unit or an application""" |
3706 | + |
3707 | async def _get_entity(name): |
3708 | try: |
3709 | return await get_unit(name) |
3710 | @@ -104,69 +109,72 @@ async def get_entity(get_unit, get_app): # pylint: disable=redefined-outer-name |
3711 | return await get_app(name) |
3712 | except JujuError: |
3713 | raise JujuError("Cannot find entity {}".format(name)) |
3714 | + |
3715 | return _get_entity |
3716 | |
3717 | |
3718 | @pytest.fixture |
3719 | async def run_command(get_unit): # pylint: disable=redefined-outer-name |
3720 | - ''' |
3721 | + """ |
3722 | Runs a command on a unit. |
3723 | |
3724 | :param cmd: Command to be run |
3725 | :param target: Unit object or unit name string |
3726 | - ''' |
3727 | + """ |
3728 | + |
3729 | async def _run_command(cmd, target): |
3730 | - unit = ( |
3731 | - target |
3732 | - if isinstance(target, juju.unit.Unit) |
3733 | - else await get_unit(target) |
3734 | - ) |
3735 | + unit = target if isinstance(target, juju.unit.Unit) else await get_unit(target) |
3736 | action = await unit.run(cmd) |
3737 | return action.results |
3738 | + |
3739 | return _run_command |
3740 | |
3741 | |
3742 | @pytest.fixture |
3743 | async def file_stat(run_command): # pylint: disable=redefined-outer-name |
3744 | - ''' |
3745 | + """ |
3746 | Runs stat on a file |
3747 | |
3748 | :param path: File path |
3749 | :param target: Unit object or unit name string |
3750 | - ''' |
3751 | + """ |
3752 | + |
3753 | async def _file_stat(path, target): |
3754 | cmd = STAT_CMD % path |
3755 | results = await run_command(cmd, target) |
3756 | - if results['Code'] != '0': |
3757 | + if results["Code"] != "0": |
3758 | # A common possible error is simply ENOENT, the file ain't there. |
3759 | # A better solution would be to retrieve the exception that the |
3760 | # remote python code raised, but that would probably require a real |
3761 | # RPC setup |
3762 | - raise RuntimeError('Stat failed: {}'.format(results)) |
3763 | + raise RuntimeError("Stat failed: {}".format(results)) |
3764 | else: |
3765 | - return json.loads(results['Stdout']) |
3766 | + return json.loads(results["Stdout"]) |
3767 | |
3768 | return _file_stat |
3769 | |
3770 | |
3771 | @pytest.fixture |
3772 | async def file_contents(run_command): # pylint: disable=redefined-outer-name |
3773 | - ''' |
3774 | + """ |
3775 | Returns the contents of a file |
3776 | |
3777 | :param path: File path |
3778 | :param target: Unit object or unit name string |
3779 | - ''' |
3780 | + """ |
3781 | + |
3782 | async def _file_contents(path, target): |
3783 | - cmd = 'cat {}'.format(path) |
3784 | + cmd = "cat {}".format(path) |
3785 | results = await run_command(cmd, target) |
3786 | - return results['Stdout'] |
3787 | + return results["Stdout"] |
3788 | + |
3789 | return _file_contents |
3790 | |
3791 | |
3792 | @pytest.fixture |
3793 | async def reconfigure_app(get_app, model): # pylint: disable=redefined-outer-name |
3794 | - '''Applies a different config to the requested app''' |
3795 | + """Applies a different config to the requested app""" |
3796 | + |
3797 | async def _reconfigure_app(cfg, target): |
3798 | application = ( |
3799 | target |
3800 | @@ -175,5 +183,6 @@ async def reconfigure_app(get_app, model): # pylint: disable=redefined-outer-na |
3801 | ) |
3802 | await application.set_config(cfg) |
3803 | await application.get_config() |
3804 | - await model.block_until(lambda: application.status == 'active') |
3805 | + await model.block_until(lambda: application.status == "active") |
3806 | + |
3807 | return _reconfigure_app |
3808 | diff --git a/src/tests/functional/test_hwhealth.py b/src/tests/functional/test_hwhealth.py |
3809 | index cae8fd4..d17aae7 100644 |
3810 | --- a/src/tests/functional/test_hwhealth.py |
3811 | +++ b/src/tests/functional/test_hwhealth.py |
3812 | @@ -5,22 +5,22 @@ import subprocess |
3813 | import asyncio |
3814 | from os.path import abspath, dirname |
3815 | |
3816 | -sys.path.append('lib') |
3817 | +sys.path.append("lib") |
3818 | |
3819 | from hwhealth import hwdiscovery # noqa: E402 |
3820 | -from hwhealth import tools # noqa: E402 |
3821 | +from hwhealth import tools # noqa: E402 |
3822 | |
3823 | |
3824 | # Treat all tests as coroutines |
3825 | pytestmark = pytest.mark.asyncio |
3826 | SERIES = [ |
3827 | - 'focal', |
3828 | - 'bionic', |
3829 | - 'xenial', |
3830 | + "focal", |
3831 | + "bionic", |
3832 | + "xenial", |
3833 | ] |
3834 | CHARM_DIR = dirname(dirname(dirname(abspath(__file__)))) |
3835 | CHARM_BUILD_DIR = dirname(CHARM_DIR) |
3836 | -NRPECFG_DIR = '/etc/nagios/nrpe.d' |
3837 | +NRPECFG_DIR = "/etc/nagios/nrpe.d" |
3838 | DEF_TIMEOUT = 600 |
3839 | # These go along with the hpe repos for the hp* tools |
3840 | |
3841 | @@ -33,46 +33,50 @@ async def deploy_hwhealth_res(model, app_name, res_filename): |
3842 | # Attaching resources is not implemented yet in libjuju |
3843 | # see https://github.com/juju/python-libjuju/issues/294 |
3844 | tools_res_path = os.path.join(CHARM_BUILD_DIR, res_filename) |
3845 | - subprocess.check_call([ |
3846 | - 'juju', |
3847 | - 'deploy', |
3848 | - '-m', |
3849 | - model.info.name, |
3850 | - os.path.join(CHARM_BUILD_DIR, 'hw-health'), |
3851 | - app_name, |
3852 | - '--resource', |
3853 | - 'tools={}'.format(tools_res_path), |
3854 | - ]) |
3855 | + subprocess.check_call( |
3856 | + [ |
3857 | + "juju", |
3858 | + "deploy", |
3859 | + "-m", |
3860 | + model.info.name, |
3861 | + os.path.join(CHARM_BUILD_DIR, "hw-health"), |
3862 | + app_name, |
3863 | + "--resource", |
3864 | + "tools={}".format(tools_res_path), |
3865 | + ] |
3866 | + ) |
3867 | |
3868 | |
3869 | async def update_hwhealth_res(model, app_name, res_filename): |
3870 | tools_res_path = os.path.join(CHARM_BUILD_DIR, res_filename) |
3871 | - subprocess.check_call([ |
3872 | - 'juju', |
3873 | - 'attach-resource', |
3874 | - '-m', |
3875 | - model.info.name, |
3876 | - app_name, |
3877 | - 'tools={}'.format(tools_res_path), |
3878 | - ]) |
3879 | + subprocess.check_call( |
3880 | + [ |
3881 | + "juju", |
3882 | + "attach-resource", |
3883 | + "-m", |
3884 | + model.info.name, |
3885 | + app_name, |
3886 | + "tools={}".format(tools_res_path), |
3887 | + ] |
3888 | + ) |
3889 | + |
3890 | |
3891 | ################### |
3892 | # Custom fixtures # |
3893 | ################### |
3894 | |
3895 | |
3896 | -@pytest.fixture(scope='module', |
3897 | - params=SERIES) |
3898 | +@pytest.fixture(scope="module", params=SERIES) |
3899 | async def deploy_app(request, model): |
3900 | - '''Deploys the hw-health charm as a subordinate of ubuntu''' |
3901 | + """Deploys the hw-health charm as a subordinate of ubuntu""" |
3902 | # TODO: this might look nicer if we deployed a bundle instead. It could be |
3903 | # a jinja template to handle the parametrization |
3904 | release = request.param |
3905 | - channel = 'stable' |
3906 | - hw_health_app_name = 'hw-health-{}'.format(release) |
3907 | - hw_health_checksum_app_name = 'hw-health-checksum-{}'.format(release) |
3908 | + channel = "stable" |
3909 | + hw_health_app_name = "hw-health-{}".format(release) |
3910 | + hw_health_checksum_app_name = "hw-health-checksum-{}".format(release) |
3911 | |
3912 | - for principal_app in ['ubuntu', 'nagios']: |
3913 | + for principal_app in ["ubuntu", "nagios"]: |
3914 | relname = series = release |
3915 | if principal_app == "nagios" and release == "focal": |
3916 | # NOTE(aluria): cs:nagios was not available in focal |
3917 | @@ -82,97 +86,90 @@ async def deploy_app(request, model): |
3918 | series = "bionic" |
3919 | await model.deploy( |
3920 | principal_app, |
3921 | - application_name='{}-{}'.format(principal_app, relname), |
3922 | + application_name="{}-{}".format(principal_app, relname), |
3923 | series=series, |
3924 | channel=channel, |
3925 | ) |
3926 | await model.deploy( |
3927 | - 'ubuntu', |
3928 | - application_name='ubuntu-checksum-{}'.format(release), |
3929 | + "ubuntu", |
3930 | + application_name="ubuntu-checksum-{}".format(release), |
3931 | series=release, |
3932 | - channel=channel |
3933 | + channel=channel, |
3934 | ) |
3935 | nrpe_app = await model.deploy( |
3936 | - 'nrpe', |
3937 | - application_name='nrpe-{}'.format(release), |
3938 | + "nrpe", |
3939 | + application_name="nrpe-{}".format(release), |
3940 | series=release, |
3941 | num_units=0, |
3942 | channel=channel, |
3943 | ) |
3944 | - for ubuntu_unit in ['ubuntu', 'ubuntu-checksum']: |
3945 | + for ubuntu_unit in ["ubuntu", "ubuntu-checksum"]: |
3946 | await nrpe_app.add_relation( |
3947 | - 'general-info', |
3948 | - '{}-{}:juju-info'.format(ubuntu_unit, release) |
3949 | + "general-info", "{}-{}:juju-info".format(ubuntu_unit, release) |
3950 | ) |
3951 | - await nrpe_app.add_relation( |
3952 | - 'monitors', |
3953 | - 'nagios-{}:monitors'.format(relname) |
3954 | - ) |
3955 | + await nrpe_app.add_relation("monitors", "nagios-{}:monitors".format(relname)) |
3956 | |
3957 | # Attaching resources is not implemented yet in libjuju |
3958 | # see https://github.com/juju/python-libjuju/issues/294 |
3959 | - await deploy_hwhealth_res(model, hw_health_app_name, 'tools.zip') |
3960 | - await deploy_hwhealth_res(model, hw_health_checksum_app_name, |
3961 | - 'tools-checksum.zip') |
3962 | + await deploy_hwhealth_res(model, hw_health_app_name, "tools.zip") |
3963 | + await deploy_hwhealth_res(model, hw_health_checksum_app_name, "tools-checksum.zip") |
3964 | |
3965 | # This is pretty horrible, but we can't deploy via libjuju |
3966 | while True: |
3967 | try: |
3968 | hw_health_app = model.applications[hw_health_app_name] |
3969 | - hw_health_checksum_app = \ |
3970 | - model.applications[hw_health_checksum_app_name] |
3971 | + hw_health_checksum_app = model.applications[hw_health_checksum_app_name] |
3972 | break |
3973 | except KeyError: |
3974 | await asyncio.sleep(5) |
3975 | |
3976 | await hw_health_app.add_relation( |
3977 | - 'general-info', |
3978 | - 'ubuntu-{}:juju-info'.format(release) |
3979 | + "general-info", "ubuntu-{}:juju-info".format(release) |
3980 | ) |
3981 | await hw_health_app.add_relation( |
3982 | - 'nrpe-external-master', |
3983 | - '{}:nrpe-external-master'.format(nrpe_app.name) |
3984 | + "nrpe-external-master", "{}:nrpe-external-master".format(nrpe_app.name) |
3985 | ) |
3986 | |
3987 | await hw_health_checksum_app.add_relation( |
3988 | - 'general-info', |
3989 | - 'ubuntu-checksum-{}:juju-info'.format(release) |
3990 | + "general-info", "ubuntu-checksum-{}:juju-info".format(release) |
3991 | ) |
3992 | await hw_health_checksum_app.add_relation( |
3993 | - 'nrpe-external-master', |
3994 | - '{}:nrpe-external-master'.format(nrpe_app.name) |
3995 | + "nrpe-external-master", "{}:nrpe-external-master".format(nrpe_app.name) |
3996 | ) |
3997 | |
3998 | # The app will initially be in blocked state because it's running in a |
3999 | # container |
4000 | await model.block_until( |
4001 | - lambda: (hw_health_app.status == 'blocked' and # noqa:W504 |
4002 | - hw_health_checksum_app.status == 'blocked'), |
4003 | - timeout=DEF_TIMEOUT |
4004 | + lambda: ( |
4005 | + hw_health_app.status == "blocked" |
4006 | + and hw_health_checksum_app.status == "blocked" # noqa:W504 |
4007 | + ), |
4008 | + timeout=DEF_TIMEOUT, |
4009 | ) |
4010 | yield hw_health_app |
4011 | |
4012 | |
4013 | -@pytest.fixture(scope='module') |
4014 | +@pytest.fixture(scope="module") |
4015 | async def deployed_unit(deploy_app): |
4016 | - '''Returns the hw-health unit we've deployed''' |
4017 | + """Returns the hw-health unit we've deployed""" |
4018 | return deploy_app.units[0] |
4019 | |
4020 | |
4021 | -@pytest.fixture(scope='function') |
4022 | +@pytest.fixture(scope="function") |
4023 | async def toolset(monkeypatch): |
4024 | # All tool classes know which files should be installed and how, so we can |
4025 | # use them to read the expected stat results. Monkeypatching is however |
4026 | # required as the classes code is not expected to be run outside of a |
4027 | # deployed charm |
4028 | with monkeypatch.context() as m: |
4029 | - m.setattr('charmhelpers.core.hookenv.charm_dir', |
4030 | - lambda: CHARM_BUILD_DIR) |
4031 | - m.setattr('charmhelpers.core.hookenv.config', |
4032 | - lambda x=None: dict()) |
4033 | - m.setattr('charmhelpers.contrib.charmsupport.nrpe.get_nagios_hostname', |
4034 | - lambda: 'pytest') |
4035 | - return [tool() for tool in hwdiscovery.get_tools('test')] |
4036 | + m.setattr("charmhelpers.core.hookenv.charm_dir", lambda: CHARM_BUILD_DIR) |
4037 | + m.setattr("charmhelpers.core.hookenv.config", lambda x=None: dict()) |
4038 | + m.setattr( |
4039 | + "charmhelpers.contrib.charmsupport.nrpe.get_nagios_hostname", |
4040 | + lambda: "pytest", |
4041 | + ) |
4042 | + return [tool() for tool in hwdiscovery.get_tools("test")] |
4043 | + |
4044 | |
4045 | ######### |
4046 | # Tests # |
4047 | @@ -180,69 +177,69 @@ async def toolset(monkeypatch): |
4048 | |
4049 | |
4050 | async def test_cannot_run_in_container(deploy_app): |
4051 | - assert deploy_app.status == 'blocked' |
4052 | + assert deploy_app.status == "blocked" |
4053 | |
4054 | |
4055 | async def test_forced_deploy(deploy_app, model, run_command): |
4056 | # Create a fake NVMe device for the cronjob to be configured |
4057 | CREATE_FAKE_NVME = "/bin/bash -c 'touch /dev/nvme0'" |
4058 | - series = deploy_app.name.split('-')[-1] |
4059 | + series = deploy_app.name.split("-")[-1] |
4060 | for unit in model.units.values(): |
4061 | - if unit.entity_id.startswith('ubuntu-{}'.format(series)): |
4062 | + if unit.entity_id.startswith("ubuntu-{}".format(series)): |
4063 | ubuntu_unit = unit |
4064 | await model.block_until( |
4065 | - lambda: ubuntu_unit.workload_status == 'active', |
4066 | - timeout=DEF_TIMEOUT |
4067 | + lambda: ubuntu_unit.workload_status == "active", timeout=DEF_TIMEOUT |
4068 | ) |
4069 | await run_command(CREATE_FAKE_NVME, ubuntu_unit) |
4070 | break |
4071 | |
4072 | - await deploy_app.set_config({'manufacturer': 'test'}) |
4073 | - await model.block_until( |
4074 | - lambda: deploy_app.status == 'active', |
4075 | - timeout=DEF_TIMEOUT |
4076 | - ) |
4077 | - assert deploy_app.status == 'active' |
4078 | + await deploy_app.set_config({"manufacturer": "test"}) |
4079 | + await model.block_until(lambda: deploy_app.status == "active", timeout=DEF_TIMEOUT) |
4080 | + assert deploy_app.status == "active" |
4081 | |
4082 | |
4083 | async def test_checksum_forced_deploy(deploy_app, model, run_command): |
4084 | # Create a fake NVMe device for the cronjob to be configured |
4085 | CREATE_FAKE_NVME = "/bin/bash -c 'touch /dev/nvme0'" |
4086 | - series = deploy_app.name.split('-')[-1] |
4087 | - checksum_app_name = 'hw-health-checksum-{}'.format(series) |
4088 | + series = deploy_app.name.split("-")[-1] |
4089 | + checksum_app_name = "hw-health-checksum-{}".format(series) |
4090 | checksum_app = model.applications[checksum_app_name] |
4091 | for unit in model.units.values(): |
4092 | - if unit.entity_id.startswith('ubuntu-checksum-{}'.format(series)): |
4093 | + if unit.entity_id.startswith("ubuntu-checksum-{}".format(series)): |
4094 | ubuntu_unit = unit |
4095 | await model.block_until( |
4096 | - lambda: ubuntu_unit.workload_status == 'active', |
4097 | - timeout=DEF_TIMEOUT |
4098 | + lambda: ubuntu_unit.workload_status == "active", timeout=DEF_TIMEOUT |
4099 | ) |
4100 | await run_command(CREATE_FAKE_NVME, ubuntu_unit) |
4101 | elif unit.entity_id.startswith(checksum_app_name): |
4102 | checksum_unit = unit |
4103 | |
4104 | - await checksum_app.set_config({'manufacturer': 'test'}) |
4105 | + await checksum_app.set_config({"manufacturer": "test"}) |
4106 | try: |
4107 | await model.block_until( |
4108 | lambda: ( |
4109 | - checksum_app.status == 'blocked' and checksum_unit.workload_status_message == 'Tool megacli - checksum error' # noqa E501 |
4110 | + checksum_app.status == "blocked" |
4111 | + and checksum_unit.workload_status_message |
4112 | + == "Tool megacli - checksum error" # noqa E501 |
4113 | ), |
4114 | - timeout=DEF_TIMEOUT) |
4115 | + timeout=DEF_TIMEOUT, |
4116 | + ) |
4117 | except asyncio.exceptions.TimeoutError: |
4118 | print( |
4119 | "failed to get expected state 'blocked:Tool megacli - checksum error', " |
4120 | - "witnessed '{}:{}'".format(checksum_app.status, checksum_unit.workload_status_message) |
4121 | + "witnessed '{}:{}'".format( |
4122 | + checksum_app.status, checksum_unit.workload_status_message |
4123 | + ) |
4124 | ) |
4125 | - assert checksum_app.status == 'blocked' |
4126 | - assert checksum_unit.workload_status_message == 'Tool megacli - checksum error' |
4127 | + assert checksum_app.status == "blocked" |
4128 | + assert checksum_unit.workload_status_message == "Tool megacli - checksum error" |
4129 | |
4130 | |
4131 | async def test_checksum_updated_resource_missing(deploy_app, model): |
4132 | - series = deploy_app.name.split('-')[-1] |
4133 | - checksum_app_name = 'hw-health-checksum-{}'.format(series) |
4134 | + series = deploy_app.name.split("-")[-1] |
4135 | + checksum_app_name = "hw-health-checksum-{}".format(series) |
4136 | checksum_app = model.applications[checksum_app_name] |
4137 | - await update_hwhealth_res(model, checksum_app_name, 'tools-missing.zip') |
4138 | + await update_hwhealth_res(model, checksum_app_name, "tools-missing.zip") |
4139 | for unit in model.units.values(): |
4140 | if unit.entity_id.startswith(checksum_app_name): |
4141 | checksum_unit = unit |
4142 | @@ -250,209 +247,242 @@ async def test_checksum_updated_resource_missing(deploy_app, model): |
4143 | |
4144 | await model.block_until( |
4145 | lambda: ( |
4146 | - checksum_app.status == 'blocked' and checksum_unit.workload_status_message == 'Tool megacli not found' # noqa E501 |
4147 | + checksum_app.status == "blocked" |
4148 | + and checksum_unit.workload_status_message |
4149 | + == "Tool megacli not found" # noqa E501 |
4150 | ), |
4151 | - timeout=DEF_TIMEOUT |
4152 | + timeout=DEF_TIMEOUT, |
4153 | ) |
4154 | |
4155 | |
4156 | async def test_checksum_updated_resource_ok(deploy_app, model): |
4157 | - series = deploy_app.name.split('-')[-1] |
4158 | - checksum_app_name = 'hw-health-checksum-{}'.format(series) |
4159 | + series = deploy_app.name.split("-")[-1] |
4160 | + checksum_app_name = "hw-health-checksum-{}".format(series) |
4161 | checksum_app = model.applications[checksum_app_name] |
4162 | - await update_hwhealth_res(model, checksum_app_name, 'tools.zip') |
4163 | + await update_hwhealth_res(model, checksum_app_name, "tools.zip") |
4164 | for unit in model.units.values(): |
4165 | if unit.entity_id.startswith(checksum_app_name): |
4166 | checksum_unit = unit |
4167 | break |
4168 | |
4169 | await model.block_until( |
4170 | - lambda: (checksum_app.status == 'active' and # noqa:W504 |
4171 | - checksum_unit.workload_status_message == 'ready'), |
4172 | - timeout=DEF_TIMEOUT |
4173 | + lambda: ( |
4174 | + checksum_app.status == "active" |
4175 | + and checksum_unit.workload_status_message == "ready" # noqa:W504 |
4176 | + ), |
4177 | + timeout=DEF_TIMEOUT, |
4178 | ) |
4179 | |
4180 | |
4181 | -async def test_deployed_file_stats(monkeypatch, toolset, deploy_app, deployed_unit, file_stat): |
4182 | +async def test_deployed_file_stats( |
4183 | + monkeypatch, toolset, deploy_app, deployed_unit, file_stat |
4184 | +): |
4185 | # This should really be a parametrized test, but fixtures cannot be used as |
4186 | # params value as if they were iterators |
4187 | # It should also check for other installed files and differentiate between |
4188 | # tool types (e.g. tools.Ipmi does not use a vendor binary) |
4189 | - series = deploy_app.name.split('-')[-1] |
4190 | + series = deploy_app.name.split("-")[-1] |
4191 | for tool in toolset: |
4192 | # Skip tools that are out of series for the currently deployed application |
4193 | with monkeypatch.context() as m: |
4194 | - m.setattr('hwhealth.tools.lsb_release', |
4195 | - lambda x=None: {'DISTRIB_CODENAME': series}) |
4196 | - m.setattr('charmhelpers.core.hookenv.config', |
4197 | - lambda x=None: {'manufacturer': 'test'}) |
4198 | + m.setattr( |
4199 | + "hwhealth.tools.lsb_release", |
4200 | + lambda x=None: {"DISTRIB_CODENAME": series}, |
4201 | + ) |
4202 | + m.setattr( |
4203 | + "charmhelpers.core.hookenv.config", |
4204 | + lambda x=None: {"manufacturer": "test"}, |
4205 | + ) |
4206 | if not tool.is_series_supported(): |
4207 | - print('Skipping tool {}. Distribution {} not supported.'.format(tool, series)) |
4208 | + print( |
4209 | + "Skipping tool {}. Distribution {} not supported.".format( |
4210 | + tool, series |
4211 | + ) |
4212 | + ) |
4213 | continue |
4214 | # Have we rendered the nrpe check cfg? |
4215 | - nrpecfg_path = os.path.join(NRPECFG_DIR, |
4216 | - 'check_{}.cfg'.format(tool._shortname)) |
4217 | - print('Checking {}'.format(nrpecfg_path)) |
4218 | + nrpecfg_path = os.path.join(NRPECFG_DIR, "check_{}.cfg".format(tool._shortname)) |
4219 | + print("Checking {}".format(nrpecfg_path)) |
4220 | test_stat = await file_stat(nrpecfg_path, deployed_unit) |
4221 | - assert test_stat['size'] > 0 |
4222 | + assert test_stat["size"] > 0 |
4223 | |
4224 | # Have we installed the nrpe check script? |
4225 | - nrpescript_path = os.path.join(tool.NRPE_PLUGINS_DIR, |
4226 | - os.path.basename(tool._nrpe_script)) |
4227 | - print('Checking {}'.format(nrpescript_path)) |
4228 | + nrpescript_path = os.path.join( |
4229 | + tool.NRPE_PLUGINS_DIR, os.path.basename(tool._nrpe_script) |
4230 | + ) |
4231 | + print("Checking {}".format(nrpescript_path)) |
4232 | test_stat = await file_stat(nrpescript_path, deployed_unit) |
4233 | - assert test_stat['size'] > 0 |
4234 | - assert test_stat['gid'] == tool.NRPE_PLUGINS_GID |
4235 | - assert test_stat['uid'] == tool.NRPE_PLUGINS_UID |
4236 | - assert test_stat['mode'] == oct(tool.NRPE_PLUGINS_MODE) |
4237 | + assert test_stat["size"] > 0 |
4238 | + assert test_stat["gid"] == tool.NRPE_PLUGINS_GID |
4239 | + assert test_stat["uid"] == tool.NRPE_PLUGINS_UID |
4240 | + assert test_stat["mode"] == oct(tool.NRPE_PLUGINS_MODE) |
4241 | |
4242 | # Have we installed any common libs? |
4243 | for lib in tool._common_libs: |
4244 | - lib_path = os.path.join(tool.NRPE_PLUGINS_DIR, |
4245 | - os.path.basename(lib)) |
4246 | - print('Checking {}'.format(nrpescript_path)) |
4247 | + lib_path = os.path.join(tool.NRPE_PLUGINS_DIR, os.path.basename(lib)) |
4248 | + print("Checking {}".format(nrpescript_path)) |
4249 | test_stat = await file_stat(lib_path, deployed_unit) |
4250 | - assert test_stat['size'] > 0 |
4251 | - assert test_stat['gid'] == tool.NRPE_PLUGINS_GID |
4252 | - assert test_stat['uid'] == tool.NRPE_PLUGINS_UID |
4253 | - assert test_stat['mode'] == oct(tool.NRPE_PLUGINS_MODE) |
4254 | + assert test_stat["size"] > 0 |
4255 | + assert test_stat["gid"] == tool.NRPE_PLUGINS_GID |
4256 | + assert test_stat["uid"] == tool.NRPE_PLUGINS_UID |
4257 | + assert test_stat["mode"] == oct(tool.NRPE_PLUGINS_MODE) |
4258 | |
4259 | if isinstance(tool, tools.Ipmi) or isinstance(tool, tools.Nvme): |
4260 | # Have we added sudo rights for running freeipmi commands? |
4261 | sudoer_path = os.path.join(tool.SUDOERS_DIR, tool._sudoer_file) |
4262 | - print('Checking {}'.format(sudoer_path)) |
4263 | + print("Checking {}".format(sudoer_path)) |
4264 | test_stat = await file_stat(sudoer_path, deployed_unit) |
4265 | - assert test_stat['size'] > 0 |
4266 | - assert test_stat['gid'] == tool.SUDOERS_GID |
4267 | - assert test_stat['uid'] == tool.SUDOERS_UID |
4268 | - assert test_stat['mode'] == oct(tool.SUDOERS_MODE) |
4269 | + assert test_stat["size"] > 0 |
4270 | + assert test_stat["gid"] == tool.SUDOERS_GID |
4271 | + assert test_stat["uid"] == tool.SUDOERS_UID |
4272 | + assert test_stat["mode"] == oct(tool.SUDOERS_MODE) |
4273 | |
4274 | if isinstance(tool, tools.Ipmi) or isinstance(tool, tools.VendorTool): |
4275 | # Have we installed the cronjob script helper? |
4276 | - cron_script_path = os.path.join(tool.NRPE_PLUGINS_DIR, |
4277 | - tool._cron_script) |
4278 | - print('Checking {}'.format(cron_script_path)) |
4279 | + cron_script_path = os.path.join(tool.NRPE_PLUGINS_DIR, tool._cron_script) |
4280 | + print("Checking {}".format(cron_script_path)) |
4281 | test_stat = await file_stat(cron_script_path, deployed_unit) |
4282 | - assert test_stat['size'] > 0 |
4283 | - assert test_stat['gid'] == tool.CRONJOB_SCRIPT_GID |
4284 | - assert test_stat['uid'] == tool.CRONJOB_SCRIPT_UID |
4285 | - assert test_stat['mode'] == oct(tool.CRONJOB_SCRIPT_MODE) |
4286 | + assert test_stat["size"] > 0 |
4287 | + assert test_stat["gid"] == tool.CRONJOB_SCRIPT_GID |
4288 | + assert test_stat["uid"] == tool.CRONJOB_SCRIPT_UID |
4289 | + assert test_stat["mode"] == oct(tool.CRONJOB_SCRIPT_MODE) |
4290 | |
4291 | # Have we installed the cronjob itself? |
4292 | - cronjob_path = os.path.join(tool.CROND_DIR, |
4293 | - 'hwhealth_{}'.format(tool._shortname)) |
4294 | - print('Checking {}'.format(cronjob_path)) |
4295 | + cronjob_path = os.path.join( |
4296 | + tool.CROND_DIR, "hwhealth_{}".format(tool._shortname) |
4297 | + ) |
4298 | + print("Checking {}".format(cronjob_path)) |
4299 | test_stat = await file_stat(cronjob_path, deployed_unit) |
4300 | - assert test_stat['size'] > 0 |
4301 | + assert test_stat["size"] > 0 |
4302 | |
4303 | if isinstance(tool, tools.VendorTool): |
4304 | # Have we installed the vendor binary? |
4305 | if isinstance(tool, tools.Mdadm): |
4306 | - bin_path = os.path.join('/sbin', tool._shortname) |
4307 | + bin_path = os.path.join("/sbin", tool._shortname) |
4308 | else: |
4309 | bin_path = os.path.join(tool.TOOLS_DIR, tool._shortname) |
4310 | - print('Checking {}'.format(bin_path)) |
4311 | + print("Checking {}".format(bin_path)) |
4312 | test_stat = await file_stat(bin_path, deployed_unit) |
4313 | - assert test_stat['size'] > 0 |
4314 | - assert test_stat['gid'] == tool.TOOLS_GID |
4315 | - assert test_stat['uid'] == tool.TOOLS_UID |
4316 | - assert test_stat['mode'] == oct(tool.TOOLS_MODE) |
4317 | - |
4318 | - |
4319 | -@pytest.mark.parametrize('script_type', ['_nrpe_script', '_cron_script']) |
4320 | -async def test_imports(script_type, monkeypatch, toolset, deploy_app, deployed_unit, run_command): |
4321 | - '''Dry run all auxiliary files to ensure we have all needed dependecies''' |
4322 | - series = deploy_app.name.split('-')[-1] |
4323 | + assert test_stat["size"] > 0 |
4324 | + assert test_stat["gid"] == tool.TOOLS_GID |
4325 | + assert test_stat["uid"] == tool.TOOLS_UID |
4326 | + assert test_stat["mode"] == oct(tool.TOOLS_MODE) |
4327 | + |
4328 | + |
4329 | +@pytest.mark.parametrize("script_type", ["_nrpe_script", "_cron_script"]) |
4330 | +async def test_imports( |
4331 | + script_type, monkeypatch, toolset, deploy_app, deployed_unit, run_command |
4332 | +): |
4333 | + """Dry run all auxiliary files to ensure we have all needed dependecies""" |
4334 | + series = deploy_app.name.split("-")[-1] |
4335 | for tool in toolset: |
4336 | # Skip tools that are out of series for the currently deployed application |
4337 | with monkeypatch.context() as m: |
4338 | - m.setattr('hwhealth.tools.lsb_release', |
4339 | - lambda x=None: {'DISTRIB_CODENAME': series}) |
4340 | - m.setattr('charmhelpers.core.hookenv.config', |
4341 | - lambda x=None: {'manufacturer': 'test'}) |
4342 | + m.setattr( |
4343 | + "hwhealth.tools.lsb_release", |
4344 | + lambda x=None: {"DISTRIB_CODENAME": series}, |
4345 | + ) |
4346 | + m.setattr( |
4347 | + "charmhelpers.core.hookenv.config", |
4348 | + lambda x=None: {"manufacturer": "test"}, |
4349 | + ) |
4350 | if not tool.is_series_supported(): |
4351 | - print('Skipping tool {}. Distribution {} not supported.'.format(tool, series)) |
4352 | + print( |
4353 | + "Skipping tool {}. Distribution {} not supported.".format( |
4354 | + tool, series |
4355 | + ) |
4356 | + ) |
4357 | continue |
4358 | script_name = getattr(tool, script_type) |
4359 | tool_name = tool.__class__.__name__ |
4360 | if not script_name: |
4361 | # Cannot pytest.skip because it would break out of the loop |
4362 | - print('Skipping test as {} does not have a {}' |
4363 | - ''.format(tool_name, script_type)) |
4364 | + print( |
4365 | + "Skipping test as {} does not have a {}" |
4366 | + "".format(tool_name, script_type) |
4367 | + ) |
4368 | else: |
4369 | - print('Checking {}: {}'.format(tool_name, script_name)) |
4370 | + print("Checking {}: {}".format(tool_name, script_name)) |
4371 | path = os.path.join(tool.NRPE_PLUGINS_DIR, script_name) |
4372 | cmd = path + " --help" |
4373 | results = await run_command(cmd, deployed_unit) |
4374 | - rc = results['Code'] |
4375 | - assert rc == '0', ('{}, {}. RC is non-zero. results={}' |
4376 | - ''.format(tool_name, script_name, results)) |
4377 | + rc = results["Code"] |
4378 | + assert rc == "0", "{}, {}. RC is non-zero. results={}".format( |
4379 | + tool_name, script_name, results |
4380 | + ) |
4381 | |
4382 | |
4383 | async def test_removal(monkeypatch, toolset, model, deploy_app, file_stat): |
4384 | - '''Remove the unit, test that all files have been cleaned up''' |
4385 | + """Remove the unit, test that all files have been cleaned up""" |
4386 | hw_health_app_name = deploy_app.name |
4387 | - series = deploy_app.name.split('-')[-1] |
4388 | + series = deploy_app.name.split("-")[-1] |
4389 | await deploy_app.remove() |
4390 | await model.block_until( |
4391 | - lambda: hw_health_app_name not in model.applications, |
4392 | - timeout=DEF_TIMEOUT |
4393 | + lambda: hw_health_app_name not in model.applications, timeout=DEF_TIMEOUT |
4394 | ) |
4395 | # Since we've removed the hw-health app, we can't target it anymore, we |
4396 | # need to find the principal unit |
4397 | for unit in model.units.values(): |
4398 | - if unit.entity_id.startswith('ubuntu-{}'.format(series)): |
4399 | + if unit.entity_id.startswith("ubuntu-{}".format(series)): |
4400 | ubuntu_unit = unit |
4401 | for tool in toolset: |
4402 | # Skip tools that are out of series for the currently deployed application |
4403 | with monkeypatch.context() as m: |
4404 | - m.setattr('hwhealth.tools.lsb_release', |
4405 | - lambda x=None: {'DISTRIB_CODENAME': series}) |
4406 | - m.setattr('charmhelpers.core.hookenv.config', |
4407 | - lambda x=None: {'manufacturer': 'test'}) |
4408 | + m.setattr( |
4409 | + "hwhealth.tools.lsb_release", |
4410 | + lambda x=None: {"DISTRIB_CODENAME": series}, |
4411 | + ) |
4412 | + m.setattr( |
4413 | + "charmhelpers.core.hookenv.config", |
4414 | + lambda x=None: {"manufacturer": "test"}, |
4415 | + ) |
4416 | if not tool.is_series_supported(): |
4417 | - print('Skipping tool {}. Distribution {} not supported.'.format(tool, series)) |
4418 | + print( |
4419 | + "Skipping tool {}. Distribution {} not supported.".format( |
4420 | + tool, series |
4421 | + ) |
4422 | + ) |
4423 | continue |
4424 | # Have we removed the nrpe check cfg? |
4425 | - nrpecfg_path = os.path.join(NRPECFG_DIR, |
4426 | - 'check_{}.cfg'.format(tool._shortname)) |
4427 | - print('Checking {}'.format(nrpecfg_path)) |
4428 | + nrpecfg_path = os.path.join(NRPECFG_DIR, "check_{}.cfg".format(tool._shortname)) |
4429 | + print("Checking {}".format(nrpecfg_path)) |
4430 | with pytest.raises(RuntimeError): |
4431 | await file_stat(nrpecfg_path, ubuntu_unit) |
4432 | |
4433 | # Have we removed the nrpe check script? |
4434 | - nrpescript_path = os.path.join(tool.NRPE_PLUGINS_DIR, |
4435 | - tool._nrpe_script) |
4436 | - print('Checking {}'.format(nrpescript_path)) |
4437 | + nrpescript_path = os.path.join(tool.NRPE_PLUGINS_DIR, tool._nrpe_script) |
4438 | + print("Checking {}".format(nrpescript_path)) |
4439 | with pytest.raises(RuntimeError): |
4440 | await file_stat(nrpescript_path, ubuntu_unit) |
4441 | |
4442 | if isinstance(tool, tools.Ipmi) or isinstance(tool, tools.Nvme): |
4443 | # Have we removed sudo rights for running freeipmi commands? |
4444 | sudoer_path = os.path.join(tool.SUDOERS_DIR, tool._sudoer_file) |
4445 | - print('Checking {}'.format(sudoer_path)) |
4446 | + print("Checking {}".format(sudoer_path)) |
4447 | with pytest.raises(RuntimeError): |
4448 | await file_stat(sudoer_path, ubuntu_unit) |
4449 | |
4450 | if isinstance(tool, tools.Ipmi) or isinstance(tool, tools.VendorTool): |
4451 | # Have we removed the cronjob script helper? |
4452 | - cronjob_path = os.path.join(tool.NRPE_PLUGINS_DIR, |
4453 | - tool._cron_script) |
4454 | - print('Checking {}'.format(cronjob_path)) |
4455 | + cronjob_path = os.path.join(tool.NRPE_PLUGINS_DIR, tool._cron_script) |
4456 | + print("Checking {}".format(cronjob_path)) |
4457 | with pytest.raises(RuntimeError): |
4458 | await file_stat(cronjob_path, ubuntu_unit) |
4459 | |
4460 | # Have we removed the cronjob itself? |
4461 | - cronjob_path = os.path.join(tool.CROND_DIR, |
4462 | - 'hwhealth_{}'.format(tool._shortname)) |
4463 | - print('Checking {}'.format(cronjob_path)) |
4464 | + cronjob_path = os.path.join( |
4465 | + tool.CROND_DIR, "hwhealth_{}".format(tool._shortname) |
4466 | + ) |
4467 | + print("Checking {}".format(cronjob_path)) |
4468 | with pytest.raises(RuntimeError): |
4469 | await file_stat(cronjob_path, ubuntu_unit) |
4470 | |
4471 | - if isinstance(tool, tools.VendorTool) and not isinstance(tool, tools.Mdadm): # noqa E501 |
4472 | + if isinstance(tool, tools.VendorTool) and not isinstance( |
4473 | + tool, tools.Mdadm |
4474 | + ): # noqa E501 |
4475 | # /sbin/mdadm will not be removed, but the vendor binaries |
4476 | # should have been |
4477 | bin_path = os.path.join(tool.TOOLS_DIR, tool._shortname) |
4478 | - print('Checking {}'.format(bin_path)) |
4479 | + print("Checking {}".format(bin_path)) |
4480 | with pytest.raises(RuntimeError): |
4481 | await file_stat(bin_path, ubuntu_unit) |
4482 | diff --git a/src/tests/unit/lib/samples.py b/src/tests/unit/lib/samples.py |
4483 | index a0790ee..db34828 100644 |
4484 | --- a/src/tests/unit/lib/samples.py |
4485 | +++ b/src/tests/unit/lib/samples.py |
4486 | @@ -1,12 +1,7 @@ |
4487 | import os |
4488 | import glob |
4489 | |
4490 | -SAMPLES_DIR = os.path.join( |
4491 | - os.path.dirname(__file__), |
4492 | - '..', |
4493 | - '..', |
4494 | - 'hw-health-samples' |
4495 | -) |
4496 | +SAMPLES_DIR = os.path.join(os.path.dirname(__file__), "..", "..", "hw-health-samples") |
4497 | |
4498 | |
4499 | def get_sample(name): |
4500 | diff --git a/src/tests/unit/test_actions.py b/src/tests/unit/test_actions.py |
4501 | index dfa7323..3a3f4f2 100644 |
4502 | --- a/src/tests/unit/test_actions.py |
4503 | +++ b/src/tests/unit/test_actions.py |
4504 | @@ -17,110 +17,149 @@ import sys |
4505 | import unittest |
4506 | import unittest.mock as mock |
4507 | |
4508 | -sys.path.append('.') |
4509 | +sys.path.append(".") |
4510 | from actions.actions import clear_sel, show_sel # noqa:E402 |
4511 | |
4512 | |
4513 | class ClearSelTestCase(unittest.TestCase): |
4514 | - |
4515 | - @mock.patch('actions.actions.log') |
4516 | - @mock.patch('subprocess.check_output') |
4517 | - @mock.patch('subprocess.check_call') |
4518 | + @mock.patch("actions.actions.log") |
4519 | + @mock.patch("subprocess.check_output") |
4520 | + @mock.patch("subprocess.check_call") |
4521 | def test_clear_sel(self, mock_check_call, mock_subprocess, mock_log): |
4522 | sel_output = "Unittest system event log output".encode() |
4523 | mock_subprocess.return_value = sel_output |
4524 | mock_check_call.return_value = None |
4525 | clear_sel() |
4526 | - mock_check_call.assert_called_once_with(['action-set', "message={}".format(sel_output.decode())]) |
4527 | + mock_check_call.assert_called_once_with( |
4528 | + ["action-set", "message={}".format(sel_output.decode())] |
4529 | + ) |
4530 | |
4531 | |
4532 | class ShowSelTestCase(unittest.TestCase): |
4533 | - |
4534 | - @mock.patch('actions.actions.log') |
4535 | - @mock.patch('actions.actions.action_set') |
4536 | - @mock.patch('actions.actions.action_get') |
4537 | - @mock.patch('subprocess.check_output') |
4538 | + @mock.patch("actions.actions.log") |
4539 | + @mock.patch("actions.actions.action_set") |
4540 | + @mock.patch("actions.actions.action_get") |
4541 | + @mock.patch("subprocess.check_output") |
4542 | def test_empty_output_from_ipmi_sel( |
4543 | - self, mock_check_output, mock_action_get, mock_action_set, mock_log): |
4544 | + self, mock_check_output, mock_action_get, mock_action_set, mock_log |
4545 | + ): |
4546 | show_all_flag = False |
4547 | output_body = "" |
4548 | expected_output = "No matching entries found" |
4549 | - self._test_valid_show_sel_call(show_all_flag, output_body, expected_output, |
4550 | - mock_check_output, mock_action_get, mock_action_set) |
4551 | + self._test_valid_show_sel_call( |
4552 | + show_all_flag, |
4553 | + output_body, |
4554 | + expected_output, |
4555 | + mock_check_output, |
4556 | + mock_action_get, |
4557 | + mock_action_set, |
4558 | + ) |
4559 | |
4560 | - @mock.patch('actions.actions.log') |
4561 | - @mock.patch('actions.actions.action_set') |
4562 | - @mock.patch('actions.actions.action_get') |
4563 | - @mock.patch('subprocess.check_output') |
4564 | + @mock.patch("actions.actions.log") |
4565 | + @mock.patch("actions.actions.action_set") |
4566 | + @mock.patch("actions.actions.action_get") |
4567 | + @mock.patch("subprocess.check_output") |
4568 | def test_only_nominal_entries_with_show_all_false( |
4569 | - self, mock_check_output, mock_action_get, mock_action_set, mock_log): |
4570 | + self, mock_check_output, mock_action_get, mock_action_set, mock_log |
4571 | + ): |
4572 | show_all_flag = False |
4573 | - output_body = "\n".join([ |
4574 | - "Header line", |
4575 | - "Nominal body line #1", |
4576 | - "Nominal body line #2", |
4577 | - "Nominal body line #3", |
4578 | - ]) |
4579 | + output_body = "\n".join( |
4580 | + [ |
4581 | + "Header line", |
4582 | + "Nominal body line #1", |
4583 | + "Nominal body line #2", |
4584 | + "Nominal body line #3", |
4585 | + ] |
4586 | + ) |
4587 | expected_output = "No matching entries found" |
4588 | - self._test_valid_show_sel_call(show_all_flag, output_body, expected_output, |
4589 | - mock_check_output, mock_action_get, mock_action_set) |
4590 | + self._test_valid_show_sel_call( |
4591 | + show_all_flag, |
4592 | + output_body, |
4593 | + expected_output, |
4594 | + mock_check_output, |
4595 | + mock_action_get, |
4596 | + mock_action_set, |
4597 | + ) |
4598 | |
4599 | - @mock.patch('actions.actions.log') |
4600 | - @mock.patch('actions.actions.action_set') |
4601 | - @mock.patch('actions.actions.action_get') |
4602 | - @mock.patch('subprocess.check_output') |
4603 | + @mock.patch("actions.actions.log") |
4604 | + @mock.patch("actions.actions.action_set") |
4605 | + @mock.patch("actions.actions.action_get") |
4606 | + @mock.patch("subprocess.check_output") |
4607 | def test_only_nominal_entries_with_show_all_true( |
4608 | - self, mock_check_output, mock_action_get, mock_action_set, mock_log): |
4609 | + self, mock_check_output, mock_action_get, mock_action_set, mock_log |
4610 | + ): |
4611 | show_all_flag = True |
4612 | - output_body = "\n".join([ |
4613 | - "Header line", |
4614 | - "Nominal body line #1", |
4615 | - "Nominal body line #2", |
4616 | - "Nominal body line #3", |
4617 | - ]) |
4618 | + output_body = "\n".join( |
4619 | + [ |
4620 | + "Header line", |
4621 | + "Nominal body line #1", |
4622 | + "Nominal body line #2", |
4623 | + "Nominal body line #3", |
4624 | + ] |
4625 | + ) |
4626 | expected_output = output_body |
4627 | - self._test_valid_show_sel_call(show_all_flag, output_body, expected_output, |
4628 | - mock_check_output, mock_action_get, mock_action_set) |
4629 | + self._test_valid_show_sel_call( |
4630 | + show_all_flag, |
4631 | + output_body, |
4632 | + expected_output, |
4633 | + mock_check_output, |
4634 | + mock_action_get, |
4635 | + mock_action_set, |
4636 | + ) |
4637 | |
4638 | - @mock.patch('actions.actions.log') |
4639 | - @mock.patch('actions.actions.action_set') |
4640 | - @mock.patch('actions.actions.action_get') |
4641 | - @mock.patch('subprocess.check_output') |
4642 | + @mock.patch("actions.actions.log") |
4643 | + @mock.patch("actions.actions.action_set") |
4644 | + @mock.patch("actions.actions.action_get") |
4645 | + @mock.patch("subprocess.check_output") |
4646 | def test_non_nominal_entries_present_with_show_all_false( |
4647 | - self, mock_check_output, mock_action_get, mock_action_set, mock_log): |
4648 | + self, mock_check_output, mock_action_get, mock_action_set, mock_log |
4649 | + ): |
4650 | show_all_flag = False |
4651 | - output_body = "\n".join([ |
4652 | - "Header line", |
4653 | - "Nominal body line #1", |
4654 | - "Warning line #1", |
4655 | - "Critical line #1", |
4656 | - "Nominal body line #2", |
4657 | - "Nominal body line #3", |
4658 | - "Warning line #2", |
4659 | - ]) |
4660 | - expected_output = "\n".join([ |
4661 | - "Header line", |
4662 | - "Warning line #1", |
4663 | - "Critical line #1", |
4664 | - "Warning line #2", |
4665 | - ]) |
4666 | - self._test_valid_show_sel_call(show_all_flag, output_body, expected_output, |
4667 | - mock_check_output, mock_action_get, mock_action_set) |
4668 | + output_body = "\n".join( |
4669 | + [ |
4670 | + "Header line", |
4671 | + "Nominal body line #1", |
4672 | + "Warning line #1", |
4673 | + "Critical line #1", |
4674 | + "Nominal body line #2", |
4675 | + "Nominal body line #3", |
4676 | + "Warning line #2", |
4677 | + ] |
4678 | + ) |
4679 | + expected_output = "\n".join( |
4680 | + ["Header line", "Warning line #1", "Critical line #1", "Warning line #2"] |
4681 | + ) |
4682 | + self._test_valid_show_sel_call( |
4683 | + show_all_flag, |
4684 | + output_body, |
4685 | + expected_output, |
4686 | + mock_check_output, |
4687 | + mock_action_get, |
4688 | + mock_action_set, |
4689 | + ) |
4690 | |
4691 | - def _test_valid_show_sel_call(self, show_all_flag, output_body, expected_output, |
4692 | - mock_check_output, mock_action_get, mock_action_set): |
4693 | + def _test_valid_show_sel_call( |
4694 | + self, |
4695 | + show_all_flag, |
4696 | + output_body, |
4697 | + expected_output, |
4698 | + mock_check_output, |
4699 | + mock_action_get, |
4700 | + mock_action_set, |
4701 | + ): |
4702 | mock_action_get.return_value = show_all_flag |
4703 | mock_check_output.return_value = output_body.encode() |
4704 | show_sel() |
4705 | - self.assertEqual(mock_action_set.call_args[0][0]['message'], |
4706 | - expected_output) |
4707 | + self.assertEqual(mock_action_set.call_args[0][0]["message"], expected_output) |
4708 | |
4709 | - @mock.patch('actions.actions.action_fail') |
4710 | - @mock.patch('actions.actions.action_get') |
4711 | - @mock.patch('subprocess.check_output') |
4712 | - def test_subprocess_error(self, mock_check_output, mock_action_get, mock_action_fail): |
4713 | + @mock.patch("actions.actions.action_fail") |
4714 | + @mock.patch("actions.actions.action_get") |
4715 | + @mock.patch("subprocess.check_output") |
4716 | + def test_subprocess_error( |
4717 | + self, mock_check_output, mock_action_get, mock_action_fail |
4718 | + ): |
4719 | def raise_error(*args, **kwargs): |
4720 | - raise subprocess.CalledProcessError(1, ['bogus-cmd']) |
4721 | + raise subprocess.CalledProcessError(1, ["bogus-cmd"]) |
4722 | |
4723 | show_all_flag = False |
4724 | mock_action_get.return_value = show_all_flag |
4725 | @@ -128,4 +167,6 @@ class ShowSelTestCase(unittest.TestCase): |
4726 | show_sel() |
4727 | self.assertEqual( |
4728 | mock_action_fail.call_args[0][0], |
4729 | - "Action failed with Command '['bogus-cmd']' returned non-zero exit status 1.") |
4730 | + "Action failed with Command '['bogus-cmd']' " |
4731 | + "returned non-zero exit status 1.", |
4732 | + ) |
4733 | diff --git a/src/tests/unit/test_check_mdadm.py b/src/tests/unit/test_check_mdadm.py |
4734 | index ca20633..fe7e357 100644 |
4735 | --- a/src/tests/unit/test_check_mdadm.py |
4736 | +++ b/src/tests/unit/test_check_mdadm.py |
4737 | @@ -6,67 +6,56 @@ import unittest.mock as mock |
4738 | |
4739 | import nagios_plugin3 |
4740 | |
4741 | -sys.path.append(os.path.join(os.path.dirname(__file__), 'lib')) |
4742 | +sys.path.append(os.path.join(os.path.dirname(__file__), "lib")) |
4743 | from samples import get_sample # noqa: E402 |
4744 | |
4745 | -sys.path.append(os.path.join(os.path.dirname(__file__), '../../files/mdadm')) |
4746 | +sys.path.append(os.path.join(os.path.dirname(__file__), "../../files/mdadm")) |
4747 | import check_mdadm # noqa: E402 |
4748 | |
4749 | |
4750 | class TestCheckMdadm(unittest.TestCase): |
4751 | def setUp(self): |
4752 | - self.samples_dir = os.path.join( |
4753 | - os.getcwd(), |
4754 | - 'tests', |
4755 | - 'hw-health-samples' |
4756 | - ) |
4757 | + self.samples_dir = os.path.join(os.getcwd(), "tests", "hw-health-samples") |
4758 | |
4759 | def test_parse_output_crit(self): |
4760 | - check_mdadm.ARGS.input_file = get_sample('mdadm.output.nrpe.critical') |
4761 | - expected = 'CRITICAL: critical msg' |
4762 | + check_mdadm.ARGS.input_file = get_sample("mdadm.output.nrpe.critical") |
4763 | + expected = "CRITICAL: critical msg" |
4764 | with self.assertRaises(nagios_plugin3.CriticalError) as context: |
4765 | check_mdadm.parse_output() |
4766 | self.assertTrue(expected in str(context.exception)) |
4767 | |
4768 | def test_parse_output_warn(self): |
4769 | - check_mdadm.ARGS.input_file = get_sample('mdadm.output.nrpe.warning') |
4770 | - expected = 'WARNING: warning msg' |
4771 | + check_mdadm.ARGS.input_file = get_sample("mdadm.output.nrpe.warning") |
4772 | + expected = "WARNING: warning msg" |
4773 | with self.assertRaises(nagios_plugin3.WarnError) as context: |
4774 | check_mdadm.parse_output() |
4775 | self.assertTrue(expected in str(context.exception)) |
4776 | |
4777 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4778 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4779 | def test_parse_output_ok(self, mock_print): |
4780 | - check_mdadm.ARGS.input_file = get_sample('mdadm.output.nrpe.ok') |
4781 | + check_mdadm.ARGS.input_file = get_sample("mdadm.output.nrpe.ok") |
4782 | check_mdadm.parse_output() |
4783 | self.assertEqual( |
4784 | mock_print.getvalue(), |
4785 | - 'OK: /dev/md0 ok; /dev/md1 ok; /dev/md3 ok; /dev/md2 ok\n' |
4786 | + "OK: /dev/md0 ok; /dev/md1 ok; /dev/md3 ok; /dev/md2 ok\n", |
4787 | ) |
4788 | |
4789 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4790 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4791 | def test_parse_output_unknown_filenotfound(self, mock_print): |
4792 | - check_mdadm.ARGS.input_file = get_sample('thisfiledoesnotexist') |
4793 | - expected = 'UNKNOWN: file not found ({})'.format( |
4794 | - check_mdadm.ARGS.input_file) |
4795 | + check_mdadm.ARGS.input_file = get_sample("thisfiledoesnotexist") |
4796 | + expected = "UNKNOWN: file not found ({})".format(check_mdadm.ARGS.input_file) |
4797 | with self.assertRaises(nagios_plugin3.UnknownError) as context: |
4798 | check_mdadm.parse_output() |
4799 | self.assertTrue(expected in str(context.exception)) |
4800 | |
4801 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4802 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4803 | def test_parse_output_unknown1(self, mock_print): |
4804 | - check_mdadm.ARGS.input_file = get_sample('mdadm.output.nrpe.unknown.1') |
4805 | + check_mdadm.ARGS.input_file = get_sample("mdadm.output.nrpe.unknown.1") |
4806 | check_mdadm.parse_output() |
4807 | - self.assertEqual( |
4808 | - mock_print.getvalue(), |
4809 | - 'UNKNOWN: unknown msg\n' |
4810 | - ) |
4811 | + self.assertEqual(mock_print.getvalue(), "UNKNOWN: unknown msg\n") |
4812 | |
4813 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4814 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4815 | def test_parse_output_unknown2(self, mock_print): |
4816 | - check_mdadm.ARGS.input_file = get_sample('mdadm.output.nrpe.unknown.2') |
4817 | + check_mdadm.ARGS.input_file = get_sample("mdadm.output.nrpe.unknown.2") |
4818 | check_mdadm.parse_output() |
4819 | - self.assertEqual( |
4820 | - mock_print.getvalue(), |
4821 | - 'unknown msg2\n' |
4822 | - ) |
4823 | + self.assertEqual(mock_print.getvalue(), "unknown msg2\n") |
4824 | diff --git a/src/tests/unit/test_check_megacli.py b/src/tests/unit/test_check_megacli.py |
4825 | index 00b9f4e..a6e7501 100644 |
4826 | --- a/src/tests/unit/test_check_megacli.py |
4827 | +++ b/src/tests/unit/test_check_megacli.py |
4828 | @@ -6,42 +6,40 @@ import unittest.mock as mock |
4829 | |
4830 | import nagios_plugin3 |
4831 | |
4832 | -sys.path.append(os.path.join(os.path.dirname(__file__), 'lib')) |
4833 | +sys.path.append(os.path.join(os.path.dirname(__file__), "lib")) |
4834 | from samples import get_sample # noqa: E402 |
4835 | |
4836 | -sys.path.append(os.path.join(os.path.dirname(__file__), '../../files/megacli')) |
4837 | +sys.path.append(os.path.join(os.path.dirname(__file__), "../../files/megacli")) |
4838 | import check_megacli # noqa: E402 |
4839 | |
4840 | |
4841 | class TestCheckMegaCLI(unittest.TestCase): |
4842 | def setUp(self): |
4843 | - self.samples_dir = os.path.join( |
4844 | - os.getcwd(), |
4845 | - 'tests', |
4846 | - 'hw-health-samples' |
4847 | - ) |
4848 | + self.samples_dir = os.path.join(os.getcwd(), "tests", "hw-health-samples") |
4849 | |
4850 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4851 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4852 | def test_parse_output(self, mock_print): |
4853 | - check_megacli.ARGS.input_file = get_sample('megacli.output.1') |
4854 | + check_megacli.ARGS.input_file = get_sample("megacli.output.1") |
4855 | check_megacli.parse_output() |
4856 | actual = mock_print.getvalue() |
4857 | - expected = 'OK: Optimal, ldrives[1], pdrives[4]\n' |
4858 | + expected = "OK: Optimal, ldrives[1], pdrives[4]\n" |
4859 | self.assertEqual(actual, expected) |
4860 | |
4861 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4862 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4863 | def test_parse_output_critical_singledrive(self, mock_print): |
4864 | - check_megacli.ARGS.input_file = get_sample('megacli.output.nrpe.critical.1') |
4865 | - expected = 'CRITICAL: adapter(0):ld(0):state(Degraded)' |
4866 | + check_megacli.ARGS.input_file = get_sample("megacli.output.nrpe.critical.1") |
4867 | + expected = "CRITICAL: adapter(0):ld(0):state(Degraded)" |
4868 | with self.assertRaises(nagios_plugin3.CriticalError) as context: |
4869 | check_megacli.parse_output() |
4870 | self.assertEqual(expected, str(context.exception)) |
4871 | |
4872 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4873 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4874 | def test_parse_output_critical_multiple(self, mock_print): |
4875 | - check_megacli.ARGS.input_file = get_sample('megacli.output.nrpe.critical.2') |
4876 | - expected = ('CRITICAL: adapter(0):ld(0):state(Degraded);' |
4877 | - ' adapter(0):ld(4):state(Degraded)') |
4878 | + check_megacli.ARGS.input_file = get_sample("megacli.output.nrpe.critical.2") |
4879 | + expected = ( |
4880 | + "CRITICAL: adapter(0):ld(0):state(Degraded);" |
4881 | + " adapter(0):ld(4):state(Degraded)" |
4882 | + ) |
4883 | with self.assertRaises(nagios_plugin3.CriticalError) as context: |
4884 | check_megacli.parse_output() |
4885 | self.assertEqual(expected, str(context.exception)) |
4886 | diff --git a/src/tests/unit/test_check_nvme.py b/src/tests/unit/test_check_nvme.py |
4887 | index 097fd76..4218a85 100644 |
4888 | --- a/src/tests/unit/test_check_nvme.py |
4889 | +++ b/src/tests/unit/test_check_nvme.py |
4890 | @@ -4,21 +4,21 @@ import sys |
4891 | import unittest |
4892 | import unittest.mock as mock |
4893 | |
4894 | -sys.path.append(os.path.join(os.path.dirname(__file__), 'lib')) |
4895 | +sys.path.append(os.path.join(os.path.dirname(__file__), "lib")) |
4896 | from samples import get_sample # noqa: E402 |
4897 | |
4898 | -sys.path.append(os.path.join(os.path.dirname(__file__), '../../files/nvme')) |
4899 | +sys.path.append(os.path.join(os.path.dirname(__file__), "../../files/nvme")) |
4900 | import check_nvme # noqa: E402 |
4901 | |
4902 | |
4903 | class TestCheckNvme(unittest.TestCase): |
4904 | - @mock.patch('check_nvme.glob.glob') |
4905 | - @mock.patch('check_nvme.subprocess.check_output') |
4906 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4907 | + @mock.patch("check_nvme.glob.glob") |
4908 | + @mock.patch("check_nvme.subprocess.check_output") |
4909 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4910 | def test_parse_output(self, mock_print, mock_subprocess, mock_glob): |
4911 | - mock_glob.return_value = ['/dev/nvme0'] |
4912 | - input_file = get_sample('nvme.output.1') |
4913 | - with open(input_file, 'r') as fd: |
4914 | + mock_glob.return_value = ["/dev/nvme0"] |
4915 | + input_file = get_sample("nvme.output.1") |
4916 | + with open(input_file, "r") as fd: |
4917 | mock_subprocess.return_value = fd.read().encode() |
4918 | check_nvme.parse_output() |
4919 | expected = ( |
4920 | diff --git a/src/tests/unit/test_check_sas2ircu.py b/src/tests/unit/test_check_sas2ircu.py |
4921 | index cd5e854..e2053d7 100644 |
4922 | --- a/src/tests/unit/test_check_sas2ircu.py |
4923 | +++ b/src/tests/unit/test_check_sas2ircu.py |
4924 | @@ -4,18 +4,18 @@ import sys |
4925 | import unittest |
4926 | import unittest.mock as mock |
4927 | |
4928 | -sys.path.append(os.path.join(os.path.dirname(__file__), 'lib')) |
4929 | +sys.path.append(os.path.join(os.path.dirname(__file__), "lib")) |
4930 | from samples import get_sample # noqa: E402 |
4931 | |
4932 | -sys.path.append(os.path.join(os.path.dirname(__file__), '../../files/sas2ircu')) |
4933 | +sys.path.append(os.path.join(os.path.dirname(__file__), "../../files/sas2ircu")) |
4934 | import check_sas2ircu # noqa: E402 |
4935 | |
4936 | |
4937 | class TestCheckMegaCLI(unittest.TestCase): |
4938 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4939 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4940 | def test_parse_output(self, mock_print): |
4941 | - check_sas2ircu.ARGS.input_file = get_sample('sas2ircu.huawei.output.1') |
4942 | + check_sas2ircu.ARGS.input_file = get_sample("sas2ircu.huawei.output.1") |
4943 | check_sas2ircu.parse_output() |
4944 | actual = mock_print.getvalue() |
4945 | - expected = 'OK: Ready[1:0,1:1,1:2,1:3,1:4,1:5,1:6,1:7]\n' |
4946 | + expected = "OK: Ready[1:0,1:1,1:2,1:3,1:4,1:5,1:6,1:7]\n" |
4947 | self.assertEqual(actual, expected) |
4948 | diff --git a/src/tests/unit/test_check_sas3ircu.py b/src/tests/unit/test_check_sas3ircu.py |
4949 | index 1379369..bb79688 100644 |
4950 | --- a/src/tests/unit/test_check_sas3ircu.py |
4951 | +++ b/src/tests/unit/test_check_sas3ircu.py |
4952 | @@ -4,19 +4,19 @@ import sys |
4953 | import unittest |
4954 | import unittest.mock as mock |
4955 | |
4956 | -sys.path.append(os.path.join(os.path.dirname(__file__), 'lib')) |
4957 | +sys.path.append(os.path.join(os.path.dirname(__file__), "lib")) |
4958 | from samples import get_sample # noqa: E402 |
4959 | |
4960 | -sys.path.append(os.path.join(os.path.dirname(__file__), '../../files/sas3ircu')) |
4961 | +sys.path.append(os.path.join(os.path.dirname(__file__), "../../files/sas3ircu")) |
4962 | import check_sas3ircu # noqa: E402 |
4963 | |
4964 | |
4965 | class TestCheckMegaCLI(unittest.TestCase): |
4966 | - @mock.patch('sys.stdout', new_callable=io.StringIO) |
4967 | + @mock.patch("sys.stdout", new_callable=io.StringIO) |
4968 | def test_parse_output_ok(self, mock_print): |
4969 | - _filepath = get_sample('sas3ircu.supermicro.ok.output.1') |
4970 | + _filepath = get_sample("sas3ircu.supermicro.ok.output.1") |
4971 | data = check_sas3ircu.parse_output(_filepath) |
4972 | check_sas3ircu.eval_status(data) |
4973 | actual = mock_print.getvalue() |
4974 | - expected = 'OK: no errors\n' |
4975 | + expected = "OK: no errors\n" |
4976 | self.assertEqual(actual, expected) |
4977 | diff --git a/src/tests/unit/test_cron_hplog.py b/src/tests/unit/test_cron_hplog.py |
4978 | index 87c5034..b17904e 100644 |
4979 | --- a/src/tests/unit/test_cron_hplog.py |
4980 | +++ b/src/tests/unit/test_cron_hplog.py |
4981 | @@ -4,27 +4,30 @@ import unittest |
4982 | from argparse import Namespace |
4983 | from pathlib import Path |
4984 | |
4985 | -sys.path.append('files/hplog') |
4986 | +sys.path.append("files/hplog") |
4987 | import cron_hplog # noqa: E402 |
4988 | |
4989 | |
4990 | class TestCronHPlog(unittest.TestCase): |
4991 | def setUp(self): |
4992 | # Skip the v flag, it serves a different purpose |
4993 | - self.test_flags = {'t', 'f', 'p'} |
4994 | + self.test_flags = {"t", "f", "p"} |
4995 | |
4996 | def _get_no_error_sample(self, flag): |
4997 | - _filepath = os.path.join(os.getcwd(), 'tests', 'hw-health-samples', |
4998 | - 'hplog.{}.ewah.out'.format(flag)) |
4999 | + _filepath = os.path.join( |
5000 | + os.getcwd(), "tests", "hw-health-samples", "hplog.{}.ewah.out".format(flag) |
The diff has been truncated for viewing.
LGTM