summaryrefslogtreecommitdiffstats
path: root/meta/lib/oeqa/runtime/cases/ptest.py
blob: 0800f3c27f2f9d82353435a2f5ca3a5def44d1e9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
#
# SPDX-License-Identifier: MIT
#

import os
import unittest
import pprint
import datetime

from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
from oeqa.runtime.decorator.package import OEHasPackage
from oeqa.utils.logparser import PtestParser

class PtestRunnerTest(OERuntimeTestCase):

    @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
    @OETestDepends(['ssh.SSHTest.test_ssh'])
    @OEHasPackage(['ptest-runner'])
    @unittest.expectedFailure
    def test_ptestrunner_expectfail(self):
        if not self.td.get('PTEST_EXPECT_FAILURE'):
            self.skipTest('Cannot run ptests with @expectedFailure as ptests are required to pass')
        self.do_ptestrunner()

    @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
    @OETestDepends(['ssh.SSHTest.test_ssh'])
    @OEHasPackage(['ptest-runner'])
    def test_ptestrunner_expectsuccess(self):
        if self.td.get('PTEST_EXPECT_FAILURE'):
            self.skipTest('Cannot run ptests without @expectedFailure as ptests are expected to fail')
        self.do_ptestrunner()

    def do_ptestrunner(self):
        status, output = self.target.run('which ptest-runner', 0)
        if status != 0:
            self.skipTest("No -ptest packages are installed in the image")

        test_log_dir = self.td.get('TEST_LOG_DIR', '')
        # The TEST_LOG_DIR maybe NULL when testimage is added after
        # testdata.json is generated.
        if not test_log_dir:
            test_log_dir = os.path.join(self.td.get('WORKDIR', ''), 'testimage')
        # Make the test output path absolute, otherwise the output content will be
        # created relative to current directory
        if not os.path.isabs(test_log_dir):
            test_log_dir = os.path.join(self.td.get('TOPDIR', ''), test_log_dir)
        # Don't use self.td.get('DATETIME'), it's from testdata.json, not
        # up-to-date, and may cause "File exists" when re-reun.
        timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        ptest_log_dir_link = os.path.join(test_log_dir, 'ptest_log')
        ptest_log_dir = '%s.%s' % (ptest_log_dir_link, timestamp)
        ptest_runner_log = os.path.join(ptest_log_dir, 'ptest-runner.log')

        libdir = self.td.get('libdir', '')
        ptest_dirs = [ '/usr/lib' ]
        if not libdir in ptest_dirs:
            ptest_dirs.append(libdir)
        status, output = self.target.run('ptest-runner -d \"{}\"'.format(' '.join(ptest_dirs)), 0)
        os.makedirs(ptest_log_dir)
        with open(ptest_runner_log, 'w') as f:
            f.write(output)

        # status != 0 is OK since some ptest tests may fail
        self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")

        if not hasattr(self.tc, "extraresults"):
            self.tc.extraresults = {}
        extras = self.tc.extraresults
        extras['ptestresult.rawlogs'] = {'log': output}

        # Parse and save results
        parser = PtestParser()
        results, sections = parser.parse(ptest_runner_log)
        parser.results_as_files(ptest_log_dir)
        if os.path.exists(ptest_log_dir_link):
            # Remove the old link to create a new one
            os.remove(ptest_log_dir_link)
        os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)

        extras['ptestresult.sections'] = sections

        trans = str.maketrans("()", "__")
        for section in results:
            for test in results[section]:
                result = results[section][test]
                testname = "ptestresult." + (section or "No-section") + "." + "_".join(test.translate(trans).split())
                extras[testname] = {'status': result}

        failed_tests = {}

        for section in sections:
            if 'exitcode' in sections[section].keys():
                failed_tests[section] = sections[section]["log"]

        for section in results:
            failed_testcases = [ "_".join(test.translate(trans).split()) for test in results[section] if results[section][test] == 'FAILED' ]
            if failed_testcases:
                failed_tests[section] = failed_testcases

        failmsg = ""
        status, output = self.target.run('dmesg | grep "Killed process"', 0)
        if output:
            failmsg = "ERROR: Processes were killed by the OOM Killer:\n%s\n" % output

        if failed_tests:
            failmsg = failmsg + "Failed ptests:\n%s" % pprint.pformat(failed_tests)

        if failmsg:
            self.logger.warning("There were failing ptests.")
            self.fail(failmsg)