[oe-commits] [openembedded-core] 15/24: resulttool: Add ptest report
git at git.openembedded.org
git at git.openembedded.org
Wed Feb 20 11:38:20 UTC 2019
This is an automated email from the git hooks/post-receive script.
rpurdie pushed a commit to branch master-next
in repository openembedded-core.
commit 5bf0d1a566b90f04d1da9dd6c3a4b892c3182bda
Author: Richard Purdie <richard.purdie at linuxfoundation.org>
AuthorDate: Sun Feb 17 17:23:15 2019 +0000
resulttool: Add ptest report
Signed-off-by: Richard Purdie <richard.purdie at linuxfoundation.org>
---
scripts/lib/resulttool/report.py | 51 ++++++++++++++++++----
scripts/lib/resulttool/resultutils.py | 8 ++--
.../resulttool/template/test_report_full_text.txt | 16 +++++++
3 files changed, 64 insertions(+), 11 deletions(-)
diff --git a/scripts/lib/resulttool/report.py b/scripts/lib/resulttool/report.py
index bdb49ce..862ac84 100644
--- a/scripts/lib/resulttool/report.py
+++ b/scripts/lib/resulttool/report.py
@@ -19,20 +19,48 @@ from resulttool.resultutils import checkout_git_dir
import resulttool.resultutils as resultutils
class ResultsTextReport(object):
+ def __init__(self):
+ self.ptests = {}
+ self.result_types = {'passed': ['PASSED', 'passed'],
+ 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
+ 'skipped': ['SKIPPED', 'skipped']}
+
+
+ def handle_ptest_result(self, k, status, result):
+ if k == 'ptestresult.sections':
+ return
+ _, suite, test = k.split(".", 2)
+ # Handle 'glib-2.0'
+ if suite not in result['ptestresult.sections']:
+ try:
+ _, suite, suite1, test = k.split(".", 3)
+ if suite + "." + suite1 in result['ptestresult.sections']:
+ suite = suite + "." + suite1
+ except ValueError:
+ pass
+ if suite not in self.ptests:
+ self.ptests[suite] = {'passed': 0, 'failed': 0, 'skipped': 0, 'duration' : '-', 'failed_testcases': []}
+ for tk in self.result_types:
+ if status in self.result_types[tk]:
+ self.ptests[suite][tk] += 1
+ if suite in result['ptestresult.sections']:
+ if 'duration' in result['ptestresult.sections'][suite]:
+ self.ptests[suite]['duration'] = result['ptestresult.sections'][suite]['duration']
+ if 'timeout' in result['ptestresult.sections'][suite]:
+ self.ptests[suite]['duration'] += " T"
def get_aggregated_test_result(self, logger, testresult):
test_count_report = {'passed': 0, 'failed': 0, 'skipped': 0, 'failed_testcases': []}
- result_types = {'passed': ['PASSED', 'passed'],
- 'failed': ['FAILED', 'failed', 'ERROR', 'error', 'UNKNOWN'],
- 'skipped': ['SKIPPED', 'skipped']}
result = testresult.get('result', [])
for k in result:
test_status = result[k].get('status', [])
- for tk in result_types:
- if test_status in result_types[tk]:
+ for tk in self.result_types:
+ if test_status in self.result_types[tk]:
test_count_report[tk] += 1
- if test_status in result_types['failed']:
+ if test_status in self.result_types['failed']:
test_count_report['failed_testcases'].append(k)
+ if k.startswith("ptestresult."):
+ self.handle_ptest_result(k, test_status, result)
return test_count_report
def print_test_report(self, template_file_name, test_count_reports):
@@ -42,25 +70,32 @@ class ResultsTextReport(object):
env = Environment(loader=file_loader, trim_blocks=True)
template = env.get_template(template_file_name)
havefailed = False
+ haveptest = bool(self.ptests)
reportvalues = []
cols = ['passed', 'failed', 'skipped']
- maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' :0 }
+ maxlen = {'passed' : 0, 'failed' : 0, 'skipped' : 0, 'result_id': 0, 'testseries' : 0, 'ptest' : 0 }
for line in test_count_reports:
total_tested = line['passed'] + line['failed'] + line['skipped']
vals = {}
vals['result_id'] = line['result_id']
vals['testseries'] = line['testseries']
vals['sort'] = line['testseries'] + "_" + line['result_id']
+ vals['failed_testcases'] = line['failed_testcases']
for k in cols:
vals[k] = "%d (%s%%)" % (line[k], format(line[k] / total_tested * 100, '.0f'))
for k in maxlen:
- if len(vals[k]) > maxlen[k]:
+ if k in vals and len(vals[k]) > maxlen[k]:
maxlen[k] = len(vals[k])
reportvalues.append(vals)
if line['failed_testcases']:
havefailed = True
+ for ptest in self.ptests:
+ if len(ptest) > maxlen['ptest']:
+ maxlen['ptest'] = len(ptest)
output = template.render(reportvalues=reportvalues,
havefailed=havefailed,
+ haveptest=haveptest,
+ ptests=self.ptests,
maxlen=maxlen)
print(output)
diff --git a/scripts/lib/resulttool/resultutils.py b/scripts/lib/resulttool/resultutils.py
index 54f8dd1..464d693 100644
--- a/scripts/lib/resulttool/resultutils.py
+++ b/scripts/lib/resulttool/resultutils.py
@@ -63,9 +63,11 @@ def append_resultsdata(results, f, configmap=store_map):
testpath = "/".join(data[res]["configuration"].get(i) for i in configmap[testtype])
if testpath not in results:
results[testpath] = {}
- for i in ['ptestresult.rawlogs', 'ptestresult.sections']:
- if i in data[res]['result']:
- del data[res]['result'][i]
+ if 'ptestresult.rawlogs' in data[res]['result']:
+ del data[res]['result']['ptestresult.rawlogs']
+ if 'ptestresult.sections' in data[res]['result']:
+ for i in data[res]['result']['ptestresult.sections']:
+ del data[res]['result']['ptestresult.sections'][i]['log']
results[testpath][res] = data[res]
#
diff --git a/scripts/lib/resulttool/template/test_report_full_text.txt b/scripts/lib/resulttool/template/test_report_full_text.txt
index 1c7484d..5081594 100644
--- a/scripts/lib/resulttool/template/test_report_full_text.txt
+++ b/scripts/lib/resulttool/template/test_report_full_text.txt
@@ -9,6 +9,22 @@ Test Result Status Summary (Counts/Percentages sorted by testseries, ID)
{% endfor %}
--------------------------------------------------------------------------------------------------------------
+{% if haveptest %}
+==============================================================================================================
+PTest Result Summary
+==============================================================================================================
+--------------------------------------------------------------------------------------------------------------
+{{ 'Recipe'.ljust(maxlen['ptest']) }} | {{ 'Passed'.ljust(maxlen['passed']) }} | {{ 'Failed'.ljust(maxlen['failed']) }} | {{ 'Skipped'.ljust(maxlen['skipped']) }} | {{ 'Time(s)'.ljust(10) }}
+--------------------------------------------------------------------------------------------------------------
+{% for ptest in ptests %}
+{{ ptest.ljust(maxlen['ptest']) }} | {{ (ptests[ptest]['passed']|string).ljust(maxlen['passed']) }} | {{ (ptests[ptest]['failed']|string).ljust(maxlen['failed']) }} | {{ (ptests[ptest]['skipped']|string).ljust(maxlen['skipped']) }} | {{ (ptests[ptest]['duration']|string) }}
+{% endfor %}
+--------------------------------------------------------------------------------------------------------------
+
+{% else %}
+There was no ptest data
+{% endif %}
+
==============================================================================================================
Failed test cases (sorted by testseries, ID)
==============================================================================================================
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
More information about the Openembedded-commits
mailing list