[oe-commits] [openembedded-core] 30/60: oeqa/runtime/ptest: Inject results+logs into stored json results file

git at git.openembedded.org git at git.openembedded.org
Tue Nov 6 11:58:30 UTC 2018


This is an automated email from the git hooks/post-receive script.

rpurdie pushed a commit to branch master-next
in repository openembedded-core.

commit 346dfd112b0c28d00bf17c21b55bbbf30a3e3f64
Author: Richard Purdie <richard.purdie at linuxfoundation.org>
AuthorDate: Fri Nov 2 13:13:43 2018 +0000

    oeqa/runtime/ptest: Inject results+logs into stored json results file
    
    This allows the ptest results from ptest-runner, run in an image to be
    transferred over to the resulting json results output.
    
    Each test is given a pass/skip/fail so individual results can be monitored
    and the raw log output from the ptest-runner is also dumped into the
    results json file as this means after the fact debugging becomes much easier.
    
    Currently the log output is not split up per test but that would make a good
    future enhancement.
    
    I attempted to implement this as python subTests however it failed as the
    output was too confusing, subTests don't support any kind of log
    output handling, subTest successes aren't logged and it was making things
    far more complex than they needed to be.
    
    We mark ptest-runner as "EXPECTEDFAILURE" since its unlikely every ptest
    will pass currently and we don't want that to fail the whole image test run.
    Its assumed there would be later analysis of the json output to determine
    regressions. We do have to change the test runner code so that
    'unexpectedsuccess' is not a failure.
    
    Also, the test names are manipuated to remove spaces and brackets with
    "_" used as a replacement and any duplicate occurrences truncated.
    
    Signed-off-by: Richard Purdie <richard.purdie at linuxfoundation.org>
---
 meta/lib/oeqa/core/runner.py         |  8 ++++++++
 meta/lib/oeqa/runtime/cases/ptest.py | 21 +++++++++++++++++++--
 2 files changed, 27 insertions(+), 2 deletions(-)

diff --git a/meta/lib/oeqa/core/runner.py b/meta/lib/oeqa/core/runner.py
index 67756c3..034f223 100644
--- a/meta/lib/oeqa/core/runner.py
+++ b/meta/lib/oeqa/core/runner.py
@@ -122,7 +122,11 @@ class OETestResult(_TestResult):
 
     def logDetails(self, json_file_dir=None, configuration=None, result_id=None):
         self.tc.logger.info("RESULTS:")
+
         result = {}
+        if hasattr(self.tc, "extraresults"):
+            result = self.tc.extraresults
+
         for case_name in self.tc._registry['cases']:
             case = self.tc._registry['cases'][case_name]
 
@@ -148,6 +152,10 @@ class OETestResult(_TestResult):
             tresultjsonhelper = OETestResultJSONHelper()
             tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
 
+    def wasSuccessful(self):
+        # Override as we unexpected successes aren't failures for us
+        return (len(self.failures) == len(self.errors) == 0)
+
 class OEListTestsResult(object):
     def wasSuccessful(self):
         return True
diff --git a/meta/lib/oeqa/runtime/cases/ptest.py b/meta/lib/oeqa/runtime/cases/ptest.py
index f60a433..77ae7b6 100644
--- a/meta/lib/oeqa/runtime/cases/ptest.py
+++ b/meta/lib/oeqa/runtime/cases/ptest.py
@@ -1,3 +1,6 @@
+import unittest
+import pprint
+
 from oeqa.runtime.case import OERuntimeTestCase
 from oeqa.core.decorator.depends import OETestDepends
 from oeqa.core.decorator.oeid import OETestID
@@ -49,6 +52,7 @@ class PtestRunnerTest(OERuntimeTestCase):
     @OETestID(1600)
     @skipIfNotFeature('ptest', 'Test requires ptest to be in DISTRO_FEATURES')
     @OETestDepends(['ssh.SSHTest.test_ssh'])
+    @unittest.expectedFailure
     def test_ptestrunner(self):
         status, output = self.target.run('which ptest-runner', 0)
         if status != 0:
@@ -76,6 +80,11 @@ class PtestRunnerTest(OERuntimeTestCase):
         # status != 0 is OK since some ptest tests may fail
         self.assertTrue(status != 127, msg="Cannot execute ptest-runner!")
 
+        if not hasattr(self.tc, "extraresults"):
+            self.tc.extraresults = {}
+        extras = self.tc.extraresults
+        extras['ptestresult.rawlogs'] = {'log': output}
+
         # Parse and save results
         parse_result = self.parse_ptest(ptest_runner_log)
         parse_result.log_as_files(ptest_log_dir, test_status = ['pass','fail', 'skip'])
@@ -84,10 +93,18 @@ class PtestRunnerTest(OERuntimeTestCase):
             os.remove(ptest_log_dir_link)
         os.symlink(os.path.basename(ptest_log_dir), ptest_log_dir_link)
 
+        trans = str.maketrans("()", "__")
+        resmap = {'pass': 'PASSED', 'skip': 'SKIPPED', 'fail': 'FAILED'}
+        for section in parse_result.result_dict:
+            for test, result in parse_result.result_dict[section]:
+                testname = "ptestresult." + section + "." + "_".join(test.translate(trans).split())
+                extras[testname] = {'status': resmap[result]}
+
         failed_tests = {}
         for section in parse_result.result_dict:
-            failed_testcases = [ test for test, result in parse_result.result_dict[section] if result == 'fail' ]
+            failed_testcases = [ "_".join(test.translate(trans).split()) for test, result in parse_result.result_dict[section] if result == 'fail' ]
             if failed_testcases:
                 failed_tests[section] = failed_testcases
 
-        self.assertFalse(failed_tests, msg = "Failed ptests: %s" %(str(failed_tests)))
+        if failed_tests:
+            self.fail("Failed ptests:\n%s" % pprint.pformat(failed_tests))

-- 
To stop receiving notification emails like this one, please contact
the administrator of this repository.


More information about the Openembedded-commits mailing list