LGTM in general but I don't love the idea of hard-coding the product name info stuff - and instead would prefer if we can somehow add this into cve_lib directly and look it up via the subprojects configuration. Also I get one failure when running the end_to_end oval lib test on impish (but the other oval_lib test suites all pass): [amurray:~/ubuntu … ubuntu-cve-tracker] master(13)+* 14s ± PYTHONPATH=scripts pytest-3 test/test_oval_lib_end_to_end.py ============================================================================================================ test session starts ============================================================================================================= platform linux -- Python 3.9.7, pytest-6.0.2, py-1.10.0, pluggy-0.13.0 rootdir: /home/amurray/ubuntu/git/ubuntu-cve-tracker collected 4 items test/test_oval_lib_end_to_end.py ..F. [100%] ================================================================================================================== FAILURES ================================================================================================================== __________________________________________________________________ TestOvalLibEndToEnd.test_validate_entire_oci_oval[com.ubuntu.bionic.usn.oval.xml-bionic_20180814-bionic] __________________________________________________________________ self = , dpkg_file = 'com.ubuntu.bionic.usn.oval.xml', manifest = 'bionic_20180814', release = 'bionic' @pytest.mark.parametrize("dpkg_file,manifest,release", # The timestamped gold manifest oscap output has not been manually # checked but it's nice to flag changes to the results of past USNs [(util.bionic_dpkg_file, "bionic_20180814", "bionic"), (util.trusty_dpkg_file, "trusty_20191107", "trusty")]) def test_validate_entire_oci_oval(self, dpkg_file, manifest, release): """Coherence check of entire generated oci OVAL""" > util.create_validate_oci(dpkg_file, "{}_full".format(release), ["--usn-oval-release", release], manifest, release) test/test_oval_lib_end_to_end.py:31: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ cls = , output_file = 'com.ubuntu.bionic.usn.oval.xml', new_filename = 'bionic_full', oscap_args = ['--usn-oval-release', 'bionic'], manifest = 'bionic_20180814', gold_file = 'bionic' @classmethod def create_validate_oci(cls, output_file, new_filename, oscap_args, manifest, gold_file): """Generate and validate oci and dpkg oval XML files for a single Ubuntu release""" new_file = cls.rel_test_path + new_filename # generate-oval creates files with identical names per-release # This setup allows running multiple tests on the same OVAL file # (without regenerating an identical oval) then creating a new OVAL # with an identical name and different content for following tests # so the OVAL is generated only as needed (less frequently than # every test run, more frequently than once per module) if not os.path.exists(new_file): dpkg_file = cls.rel_test_path + output_file oci_file = cls.rel_test_path + "oci." + output_file # Generate OVAL if sys.version_info[0] < 3: pycov = "python-coverage" else: pycov = "python3-coverage" subprocess.check_output([pycov, "run", "-a", "scripts/generate-oval", "--oci", "--usn-oval", "--output-dir={}".format(cls.rel_test_path)] + oscap_args) # Validate file structure subprocess.check_output(["oscap", "oval", "validate", dpkg_file], stderr=subprocess.STDOUT) subprocess.check_output(["oscap", "oval", "validate", oci_file], stderr=subprocess.STDOUT) os.rename(oci_file, new_file) cls.files_to_del.update([new_file, dpkg_file]) # Test the oci XML file against a manifest manifest_dir = cls.rel_test_path + "manifests/" + manifest + "/" cmd_output = subprocess.check_output(["oscap", "oval", "eval", new_file], stderr=subprocess.STDOUT, cwd=manifest_dir) # Convert to str for py 3 compatibility cmd_output = cmd_output.decode("utf-8") # Compare output to expected with open(cls.rel_test_path + "gold_oci_results/" + gold_file) as f: gold_output = f.readlines() for line in gold_output: > assert(line in cmd_output) E AssertionError: assert 'Definition oval:com.ubuntu.bionic:def:38401000000: true\n' in 'Definition oval:com.ubuntu.bionic:def:48781000000: false\nDefinition oval:com.ubuntu.bionic:def:48771000000: true\nDe...com.ubuntu.bionic:def:36293000000: false\nDefinition oval:com.ubuntu.bionic:def:36272000000: false\nEvaluation done.\n' test/test_utils.py:70: AssertionError ========================================================================================================== short test summary info =========================================================================================================== FAILED test/test_oval_lib_end_to_end.py::TestOvalLibEndToEnd::test_validate_entire_oci_oval[com.ubuntu.bionic.usn.oval.xml-bionic_20180814-bionic] - AssertionError: assert 'Definition oval:com.ubuntu.bionic:def:38401000000: true\n' in ... =================================================================================================== 1 failed, 3 passed in 62.91s (0:01:02) ===================================================================================================