diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2018-07-09 15:20:34 +0000 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2018-07-16 16:44:26 +0100 |
commit | 326ababfd620ae5ea29bf486b9d68ba3d60cad30 (patch) | |
tree | 04e46acabadb37fe91a2d1e15f4e6a087eb905da /meta/lib/oeqa/core/runner.py | |
parent | 26e04b23ba1b6942aa7c7df478d41dfe7b73e6e0 (diff) | |
download | openembedded-core-contrib-326ababfd620ae5ea29bf486b9d68ba3d60cad30.tar.gz |
oeqa: Add selftest parallelisation support
This allows oe-selftest to take a -j option which specifies how much test
parallelisation to use. Currently this is "module" based with each module
being split and run in a separate build directory. Further splitting could
be done but this seems a good compromise between test setup and parallelism.
You need python-testtools and python-subunit installed to use this but only
when the -j option is specified.
See notes posted to the openedmbedded-architecture list for more details
about the design choices here.
Some of this functionality may make more sense in the oeqa core ultimately.
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
Diffstat (limited to 'meta/lib/oeqa/core/runner.py')
-rw-r--r-- | meta/lib/oeqa/core/runner.py | 24 |
1 files changed, 20 insertions, 4 deletions
diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py index 219102c6b0..6adbe3827b 100644 --- a/meta/lib/oeqa/core/runner.py +++ b/meta/lib/oeqa/core/runner.py @@ -43,11 +43,17 @@ class OETestResult(_TestResult): super(OETestResult, self).__init__(*args, **kwargs) self.successes = [] + self.starttime = {} + self.endtime = {} + self.progressinfo = {} self.tc = tc self._tc_map_results() def startTest(self, test): + # May have been set by concurrencytest + if test.id() not in self.starttime: + self.starttime[test.id()] = time.time() super(OETestResult, self).startTest(test) def _tc_map_results(self): @@ -57,6 +63,12 @@ class OETestResult(_TestResult): self.tc._results['expectedFailures'] = self.expectedFailures self.tc._results['successes'] = self.successes + def stopTest(self, test): + self.endtime[test.id()] = time.time() + super(OETestResult, self).stopTest(test) + if test.id() in self.progressinfo: + print(self.progressinfo[test.id()]) + def logSummary(self, component, context_msg=''): elapsed_time = self.tc._run_end_time - self.tc._run_start_time self.tc.logger.info("SUMMARY:") @@ -141,12 +153,16 @@ class OETestResult(_TestResult): if hasattr(d, 'oeid'): oeid = d.oeid + t = "" + if case.id() in self.starttime and case.id() in self.endtime: + t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)" + if fail: - self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(), - oeid, desc)) + self.tc.logger.info("RESULTS - %s - Testcase %s: %s%s" % (case.id(), + oeid, desc, t)) else: - self.tc.logger.info("RESULTS - %s - Testcase %s: %s" % (case.id(), - oeid, 'UNKNOWN')) + self.tc.logger.info("RESULTS - %s - Testcase %s: %s%s" % (case.id(), + oeid, 'UNKNOWN', t)) class OEListTestsResult(object): def wasSuccessful(self): |