[dv] Improve sim.py error reporting

* Handle missing log files with error messages rather than terminating
  on unhandled exceptions
* Output potential failure causes from sim log file into regression log
* Alter per test output to make it clearer what line corresponds to what
  test
* Only output [PASSED] or [FAILED] a single time per test
* Don't output [PASSED] where sim log is good but ISS comparison is not
This commit is contained in:
Greg Chadwick 2021-01-29 15:48:27 +00:00
parent f291d1beb9
commit 373212ee89
2 changed files with 58 additions and 38 deletions

View file

@ -172,36 +172,50 @@ def process_imm(instr_name, pc, operands):
return operands[0:idx + 1] + imm
def check_ibex_uvm_log(uvm_log, core_name, test_name, report, write=True):
def check_ibex_uvm_log(uvm_log):
"""Process Ibex UVM simulation log.
This function will be used when a test disables the normal post_compare
step. Process the UVM simulation log produced by the test to check for
correctness
Process the UVM simulation log produced by the test to check for
correctness, reports failure if an explicit error or failure is seen in the
log or there's no explicit pass.
Args:
uvm_log: the uvm simulation log
core_name: the name of the core
test_name: name of the test being checked
report: the output report file
write: enables writing to the log file. If equal to 'onfail',
write when the test fails. Otherwise (true, but not the
string 'onfail'), write unconditionally.
Returns:
A boolean indicating whether the test passed or failed based on the
signature
A tuple of (passed, log_out).
`passed` indicates whether the test passed or failed based on the
log.
`log_out` a list of relevant lines from the log that may indicate the
source of the failure, if `passed` is true it will be empty.
"""
passed = False
failed = False
log_out = []
with open(uvm_log, "r") as log:
# Simulation log has report summary at the end, which references
# 'UVM_ERROR' which can cause false failures. The summary appears after
# the test result so ignore any lines after the test result is seen for
# 'UVM_ERROR' checking. If the loop terminated immediately when a test
# result was seen it would miss issues where the test result is
# (erronously) repeated multiple times with different results.
test_result_seen = False
for line in log:
if ('UVM_ERROR' in line or 'UVM_FATAL' in line) \
and not test_result_seen:
log_out.append(line)
failed = True
if 'RISC-V UVM TEST PASSED' in line:
test_result_seen = True
passed = True
if 'RISC-V UVM TEST FAILED' in line:
test_result_seen = True
failed = True
break
@ -210,19 +224,7 @@ def check_ibex_uvm_log(uvm_log, core_name, test_name, report, write=True):
if failed:
passed = False
if write:
fd = open(report, "a+") if report else sys.stdout
fd.write("%s uvm log : %s\n" % (core_name, uvm_log))
if passed:
fd.write("%s : [PASSED]\n\n" % test_name)
elif failed:
fd.write("%s : [FAILED]\n\n" % test_name)
if report:
fd.close()
return passed
return (passed, log_out)
def main():

View file

@ -368,6 +368,9 @@ def compare_test_run(test, idx, iss, output_dir, report):
logging.info("Comparing %s/DUT sim result : %s" % (iss, elf))
with open(report, 'a') as report_fd:
test_name_idx = '{}.{}'.format(test_name, idx)
test_underline = '-' * len(test_name_idx)
report_fd.write('\n{}\n{}\n'.format(test_name_idx, test_underline))
report_fd.write('Test binary: {}\n'.format(elf))
rtl_dir = os.path.join(output_dir, 'rtl_sim',
@ -380,21 +383,30 @@ def compare_test_run(test, idx, iss, output_dir, report):
try:
# Convert the RTL log file to a trace CSV.
process_ibex_sim_log(rtl_log, rtl_csv, 1)
except RuntimeError as e:
except (OSError, RuntimeError) as e:
with open(report, 'a') as report_fd:
report_fd.write('Log processing failed: {}\n'.format(e))
report_fd.write('[FAILED]: Log processing failed: {}\n'.format(e))
return False
# Have a look at the UVM log. We should write out a message on failure or
# if we are stopping at this point.
no_post_compare = test.get('no_post_compare')
if not check_ibex_uvm_log(uvm_log, "ibex", test_name, report,
write=(True if no_post_compare else 'onfail')):
return False
uvm_pass, uvm_log_lines = check_ibex_uvm_log(uvm_log)
if no_post_compare:
return True
with open(report, 'a') as report_fd:
report_fd.write('sim log: {}\n'.format(uvm_log))
if not uvm_pass:
for line in uvm_log_lines:
report_fd.write(line)
report_fd.write('[FAILED]: sim error seen\n')
return False
if no_post_compare:
report_fd.write('[PASSED]\n')
return True
# There were no UVM errors. Process the log file from the ISS.
iss_dir = os.path.join(output_dir, 'instr_gen', '{}_sim'.format(iss))
@ -402,11 +414,17 @@ def compare_test_run(test, idx, iss, output_dir, report):
iss_log = os.path.join(iss_dir, '{}.{}.log'.format(test_name, idx))
iss_csv = os.path.join(iss_dir, '{}.{}.csv'.format(test_name, idx))
if iss == "spike":
process_spike_sim_log(iss_log, iss_csv)
else:
assert iss == 'ovpsim' # (should be checked by argparse)
process_ovpsim_sim_log(iss_log, iss_csv)
try:
if iss == "spike":
process_spike_sim_log(iss_log, iss_csv)
else:
assert iss == 'ovpsim' # (should be checked by argparse)
process_ovpsim_sim_log(iss_log, iss_csv)
except (OSError, RuntimeError) as e:
with open(report, 'a') as report_fd:
report_fd.write('[FAILED]: Log processing failed: {}\n'.format(e))
return False
compare_result = \
compare_trace_csv(rtl_csv, iss_csv, "ibex", iss, report,
@ -436,7 +454,7 @@ def compare(test_list, iss, output_dir):
else:
fails += 1
summary = "{} PASSED, {} FAILED".format(passes, fails)
summary = "\n{} PASSED, {} FAILED".format(passes, fails)
with open(report, 'a') as report_fd:
report_fd.write(summary + '\n')