Update code from upstream repository
https://github.com/lowRISC/opentitan to revision
d1be61ba88a145e882df4e7c7a47f78bcf2371f8

* [testplanner] Replace IP milestone terminology with development
  stage (Michael Schaffner)
* [doc] Replace IP milestone terminology with development stage
  (Michael Schaffner)
* [prim] Fix missing case from prim_reg_cdc_arb assert (Timothy Chen)
* [tools/dv] Remove set_fsm_reset_scoring coverage directive from
  common.ccf (Steve Nelson)
* [dv] Exclude FSM transitions that can only happen on reset (Weicai
  Yang)
* [chip dv] Fixes for chip level falures (Srikrishna Iyer)
* [dv, mem_bkdr_util] Add system base addr (Srikrishna Iyer)
* Switch to run-time options instead (Timothy Chen)
* [dvsim] Fix coverage upload URL (Michael Schaffner)
* [prim] Tweak code slightly to avoid UNR entries (Timothy Chen)
* [prim] Add () to s_eventually (Timothy Chen)
* [dvsim] Add python workaround for shutil (Michael Schaffner)
* [dvsim] Make sure odir is of type Path (Michael Schaffner)
* [dvsim] Fix bug causing error in existing odirs (Canberk Topal)
* [prim] More refactoring to remove UNR generation (Timothy Chen)
* [dvsim] Fix flake8 lint warnings (Michael Schaffner)
* [dvsim] Align local and server path structure (Michael Schaffner)
* [dvsim] Remove support for email report (Michael Schaffner)
* [dvsim/doc] Place summary results into separate hierarchy (Michael
  Schaffner)
* [dvsim/utils] Fix a typo (Michael Schaffner)
* [dvsim] Default report folder name to 'latest' (Michael Schaffner)
* [dvsim] Use relative links on summary page (Michael Schaffner)
* [xcelium warning] Cleanup unexpected semicolon warning (Srikrishna
  Iyer)
* [dv/mem_bkdr] Fix digest update (Timothy Chen)
* [dvsim] Handle same test added twice via `-i` (Srikrishna Iyer)
* [lint] Fix shellcheck errors in hw (Miles Dai)
* [sw/silicon_creator] Rename mask_rom to rom (Alphan Ulusoy)
* [spi_device/dv] Fix payload check (Weicai Yang)
* [dvsvim] ensure ELF file with proper ext gets copied to `run_dir`
  (Timothy Trippel)
* [prim] Assertion update for prim_reg_cdc (Timothy Chen)
* [prim_lfsr dv] Designate a primary build (Srikrishna Iyer)
* [dv] Increase stress tests run time limit to 3h (Weicai Yang)
* [dvsim] Fix run timeout override in hjson (Srikrishna Iyer)
* [dv/cov] Exclude some prim modules from detailed coverage (Guillermo
  Maturana)
* [prim,dv] Reg CDC hardware request fix (Canberk Topal)
* [prim] Add more lint waivers (Michael Schaffner)
* [dvsim] Add support for specifying primary_build_mode (Srikrishna
  Iyer)
* [dv] Add some VCS coverage options (Srikrishna Iyer)
* feat(kmac): Add FI attack protection on packer pos (Eunchan Kim)
* [dv] small fix at mem_model (Weicai Yang)
* [dvsim] enable manufacturer tests to run in DV sim (Timothy Trippel)
* [dvsim] Fix errors due to test duplication (Srikrishna Iyer)
* [pad_wrapper] Do not model keeper (Michael Schaffner)
* [lint] Fix several SAME_NAME_TYPE errors (Michael Schaffner)
* [flash_ctrl] Lint fix (Michael Schaffner)
* [dvsim] Include error message cotext (Srikrishna Iyer)

Signed-off-by: Michael Schaffner <msf@google.com>
This commit is contained in:
Michael Schaffner 2022-08-24 13:03:04 -07:00
parent 7b4b780c7e
commit bbde00d174
65 changed files with 564 additions and 409 deletions

View file

@ -9,6 +9,6 @@
upstream:
{
url: https://github.com/lowRISC/opentitan
rev: f9e6675507fdd81e0b0dd3481c0a4bca634f322d
rev: d1be61ba88a145e882df4e7c7a47f78bcf2371f8
}
}

View file

@ -40,6 +40,9 @@ class mem_bkdr_util extends uvm_object;
protected uint32_t addr_width;
protected uint32_t byte_addr_width;
// Address range of this memory in the system address map.
protected addr_range_t addr_range;
// Indicates the maximum number of errors that can be injected.
//
// If parity is enabled, this limit applies to a single byte in the memory width. We cannot inject
@ -61,7 +64,7 @@ class mem_bkdr_util extends uvm_object;
// package.
function new(string name = "", string path, int unsigned depth,
longint unsigned n_bits, err_detection_e err_detection_scheme,
int extra_bits_per_subword = 0);
int extra_bits_per_subword = 0, int unsigned system_base_addr = 0);
bit res;
super.new(name);
@ -111,6 +114,8 @@ class mem_bkdr_util extends uvm_object;
addr_lsb = $clog2(bytes_per_word);
addr_width = $clog2(depth);
byte_addr_width = addr_width + addr_lsb;
addr_range.start_addr = system_base_addr;
addr_range.end_addr = system_base_addr + size_bytes - 1;
max_errors = width;
if (name == "") set_name({path, "::mem_bkdr_util"});
`uvm_info(`gfn, this.convert2string(), UVM_MEDIUM)
@ -129,7 +134,9 @@ class mem_bkdr_util extends uvm_object;
$sformatf("addr_lsb = %0d\n", addr_lsb),
$sformatf("addr_width = %0d\n", addr_width),
$sformatf("byte_addr_width = %0d\n", byte_addr_width),
$sformatf("max_errors = %0d\n", max_errors)};
$sformatf("max_errors = %0d\n", max_errors),
$sformatf("addr_range.start_addr = 0x%0h\n", addr_range.start_addr),
$sformatf("addr_range.end_addr = 0x%0h\n", addr_range.end_addr)};
endfunction
function string get_path();
@ -176,6 +183,10 @@ class mem_bkdr_util extends uvm_object;
return byte_addr_width;
endfunction
function bit is_valid_addr(int unsigned system_addr);
return system_addr inside {[addr_range.start_addr:addr_range.end_addr]};
endfunction
function string get_file();
return file;
endfunction

View file

@ -100,7 +100,7 @@ virtual function void rom_encrypt_write32_integ(logic [bus_params_pkg::BUS_AW-1:
// flip some bits to inject integrity fault
integ_data ^= flip_bits;
// Calculate the scrambled data
wdata_arr = {<<{integ_data}};
wdata_arr = sram_scrambler_pkg::encrypt_sram_data(
@ -155,17 +155,24 @@ virtual function void update_rom_digest(logic [SRAM_KEY_WIDTH-1:0] key,
int digest_start_addr = kmac_data_bytes;
bit scramble_data = 0; // digest and kmac data aren't scrambled
// kmac data is twice of kmac_data_bytes as we use 64 bit bus to send 32 bit data + 7 intg
kmac_data_arr = new[kmac_data_bytes * 2];
// Each 4 byte of data is transferred as 5 bytes
int xfer_bytes = kmac_data_bytes * 5 / 4;
kmac_data_arr = new[xfer_bytes];
`uvm_info(`gfn, $sformatf("Actual bytes: %d, xfer'd: %d", kmac_data_bytes, xfer_bytes), UVM_DEBUG)
for (int i = 0; i < kmac_data_bytes; i += 4) begin
bit [63:0] data64;
bit [39:0] data40;
// it returns 39 bits, including integrity. and the 39 bits data will be sent to 64 bits bus to
// the kmac
data64 = 64'(rom_encrypt_read32(i, key, nonce, scramble_data));
for (int j = 0; j < 8; j++) begin
kmac_data_arr[i * 2 + j] = data64[j * 8 +: 8];
// it returns 39 bits, including integrity. and the 39 bits data will be sent to 40 bits bus to
// the kmac. The kmac bus has byte strobes that are used to indicate 5 bytes instead of the full
// 8.
data40 = 40'(rom_encrypt_read32(i, key, nonce, scramble_data));
for (int j = 0; j < 5; j++) begin
// At byte position 0, we want bytes 0, 1, 2, 3, 4
// At byte position 4, we want bytes 5, 6, 7, 8, 9
// At byte position 8, we want bytes 10, 11, 12, 13, 14
int idx = i + (i / 4) + j;
kmac_data_arr[idx] = data40[j * 8 +: 8];
end
end
digestpp_dpi_pkg::c_dpi_cshake256(kmac_data_arr, "", "ROM_CTRL", kmac_data_arr.size,

View file

@ -5,7 +5,7 @@
package mem_bkdr_util_pkg;
// dep packages
import bus_params_pkg::BUS_AW;
import dv_utils_pkg::uint32_t;
import dv_utils_pkg::uint32_t, dv_utils_pkg::addr_range_t;
import lc_ctrl_state_pkg::*;
import otp_ctrl_part_pkg::*;
import otp_ctrl_reg_pkg::*;

View file

@ -7,11 +7,11 @@ class mem_model #(int AddrWidth = bus_params_pkg::BUS_AW,
localparam int MaskWidth = DataWidth / 8;
typedef bit [AddrWidth-1:0] mem_addr_t;
typedef bit [DataWidth-1:0] mem_data_t;
typedef bit [MaskWidth-1:0] mem_mask_t;
typedef logic [AddrWidth-1:0] mem_addr_t;
typedef logic [DataWidth-1:0] mem_data_t;
typedef logic [MaskWidth-1:0] mem_mask_t;
bit [7:0] system_memory[mem_addr_t];
logic [7:0] system_memory[mem_addr_t];
`uvm_object_param_utils(mem_model#(AddrWidth, DataWidth))
@ -37,15 +37,16 @@ class mem_model #(int AddrWidth = bus_params_pkg::BUS_AW,
return data;
endfunction
function void write_byte(mem_addr_t addr, bit [7:0] data);
function void write_byte(mem_addr_t addr, logic [7:0] data);
`uvm_info(`gfn, $sformatf("Write Mem : Addr[0x%0h], Data[0x%0h]", addr, data), UVM_HIGH)
system_memory[addr] = data;
endfunction
function void compare_byte(mem_addr_t addr, bit [7:0] act_data);
function void compare_byte(mem_addr_t addr, logic [7:0] act_data);
`uvm_info(`gfn, $sformatf("Compare Mem : Addr[0x%0h], Act Data[0x%0h], Exp Data[0x%0h]",
addr, act_data, system_memory[addr]), UVM_HIGH)
`DV_CHECK_EQ(act_data, system_memory[addr], $sformatf("addr 0x%0h read out mismatch", addr))
addr, act_data, system_memory[addr]), UVM_MEDIUM)
`DV_CHECK_CASE_EQ(act_data, system_memory[addr],
$sformatf("addr 0x%0h read out mismatch", addr))
endfunction
function void write(input mem_addr_t addr, mem_data_t data, mem_mask_t mask = '1);
@ -77,8 +78,12 @@ class mem_model #(int AddrWidth = bus_params_pkg::BUS_AW,
for (int i = 0; i < DataWidth / 8; i++) begin
mem_addr_t byte_addr = addr + i;
byte_data = act_data[7:0];
if (mask[0] && (!compare_exist_addr_only || addr_exists(byte_addr))) begin
compare_byte(byte_addr, byte_data);
if (mask[0]) begin
if (addr_exists(byte_addr)) begin
compare_byte(byte_addr, byte_data);
end else if (!compare_exist_addr_only) begin
`uvm_error(`gfn, $sformatf("address 0x%0x not exists", byte_addr))
end
end else begin
// Nothing to do here: since this byte wasn't selected by the mask, there are no
// requirements about what data came back.

View file

@ -29,7 +29,7 @@ class push_pull_indefinite_host_seq #(parameter int HostDataWidth = 32,
task wait_for_items_processed(int n);
wait(items_processed >= n);
endtask;
endtask
virtual task body();
items_processed = 0;

View file

@ -16,7 +16,7 @@
// Default directory structure for the output
build_dir: "{scratch_path}/{build_mode}"
run_dir_name: "{index}.{test}"
run_dir: "{scratch_path}/{run_dir_name}/out"
run_dir: "{scratch_path}/{run_dir_name}/latest"
sw_root_dir: "{proj_root}/sw"
// Default file to store build_seed value
@ -150,6 +150,7 @@
// Project defaults for VCS
vcs_cov_cfg_file: "{{build_mode}_vcs_cov_cfg_file}"
vcs_unr_cfg_file: "{dv_root}/tools/vcs/unr.cfg"
vcs_fsm_reset_cov_cfg_file: "{dv_root}/tools/vcs/fsm_reset_cov.cfg"
vcs_cov_excl_files: []
// Build-specific coverage cfg files for VCS.

View file

@ -65,6 +65,12 @@ ifneq (${sw_images},)
# If one delimiter is detected, then the full string is considered to be the
# <Bazel label>. If two delimiters are detected, then it must be <Bazel label>
# followed by <index>. The <flag> is considered optional.
#
# After the images are built, we use `bazel cquery ...` to locate the built
# software artifacts so they can be copied to the test bench run directory.
# We only copy device SW images, and do not copy host-side artifacts (like
# opentitantool) that are also dependencies of the Bazel test target that
# encode the software image targets.
set -e; \
for sw_image in ${sw_images}; do \
if [[ -z $$sw_image ]]; then \
@ -73,7 +79,7 @@ ifneq (${sw_images},)
exit 1; \
fi; \
prebuilt_path=`echo $$sw_image | cut -d: -f 1`; \
bazel_label=`echo $$sw_image | cut -d: -f 1-2`; \
bazel_label="`echo $$sw_image | cut -d: -f 1-2`_$${sw_build_device}"; \
bazel_target=`echo $$sw_image | cut -d: -f 2`; \
index=`echo $$sw_image | cut -d: -f 3`; \
flags=(`echo $$sw_image | cut -d: -f 4- --output-delimiter " "`); \
@ -82,10 +88,7 @@ ifneq (${sw_images},)
echo "SW image \"$$bazel_label\" is prebuilt - copying sources."; \
cp ${proj_root}/$${prebuilt_path} $${run_dir}/`basename $${prebuilt_path}`; \
else \
echo "Building SW image \"$$bazel_label\"."; \
if [[ $$index == "1" ]]; then \
bazel_label+="_sim_dv"; \
fi; \
echo "Building SW image \"$${bazel_label}\"."; \
bazel_opts="${sw_build_opts} --define DISABLE_VERILATOR_BUILD=true"; \
if [[ -z $${BAZEL_PYTHON_WHEELS_REPO} ]]; then \
echo "Building \"$${bazel_label}\" on network connected machine."; \
@ -97,9 +100,24 @@ ifneq (${sw_images},)
fi; \
echo "Building with command: $${bazel_cmd} build $${bazel_opts} $${bazel_label}"; \
$${bazel_cmd} build $${bazel_opts} $${bazel_label}; \
find -L $$($${bazel_cmd} info output_path)/ \
-type f -name "$${bazel_target}*" | \
xargs -I % sh -c 'cp -f % $${run_dir}/$$(basename %)'; \
for dep in $$($${bazel_cmd} cquery "labels(data, $${bazel_label})" \
--ui_event_filters=-info \
--noshow_progress \
--output=starlark); do \
if [[ $$dep != //hw* ]] && [[ $$dep != //util* ]] && [[ $$dep != //sw/host* ]]; then \
for artifact in $$($${bazel_cmd} cquery $${dep} \
--ui_event_filters=-info \
--noshow_progress \
--output=starlark \
--starlark:expr="\"\\n\".join([f.path for f in target.files.to_list()])"); do \
cp -f $${artifact} $${run_dir}/$$(basename $${artifact}); \
if [[ $$artifact == *.scr.vmem ]]; then \
cp -f "$$(echo $${artifact} | cut -d. -f 1).elf" \
$${run_dir}/$$(basename "$${artifact%.*.scr.vmem}.elf"); \
fi; \
done; \
fi; \
done; \
fi; \
done;
endif

View file

@ -20,9 +20,8 @@
all sets back to 0.
- Repeat the above steps a bunch of times.
'''
milestone: V2
stage: V2
tests: ["{name}{intf}_alert_test"]
}
]
}

View file

@ -16,7 +16,7 @@
CSRs are accessible from.
- Shuffle the list of CSRs first to remove the effect of ordering.
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_csr_hw_reset"]
}
{
@ -30,7 +30,7 @@
CSRs are accessible from.
- Shuffle the list of CSRs first to remove the effect of ordering.
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_csr_rw"]
}
{
@ -46,7 +46,7 @@
CSRs are accessible from.
- Shuffle the list of CSRs first to remove the effect of ordering.
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_csr_bit_bash"]
}
{
@ -63,7 +63,7 @@
CSRs are accessible from.
- Shuffle the list of CSRs first to remove the effect of ordering.
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_csr_aliasing"]
}
{
@ -77,7 +77,7 @@
- It is mandatory to run this test for all available interfaces the
CSRs are accessible from.
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_csr_mem_rw_with_rand_reset"]
}
{
@ -95,7 +95,7 @@
This is only applicable if the block contains regwen and locakable CSRs.
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_csr_rw", "{name}{intf}_csr_aliasing"]
}
]
@ -114,4 +114,3 @@
}
]
}

View file

@ -10,9 +10,8 @@
assertion to ensure the read value from the TileLink is expected, and a write assertion
to ensure the write value is updated correctly to DUT according to the register's access.
'''
milestone: V2
stage: V2
tests: ["{name}{intf}_fpv_csr_rw"]
}
]
}

View file

@ -19,9 +19,8 @@
CSR(s).
- Repeat the above steps a bunch of times.
'''
milestone: V2
stage: V2
tests: ["{name}{intf}_intr_test"]
}
]
}

View file

@ -11,7 +11,7 @@
- It is mandatory to run this test from all available interfaces the
memories are accessible from.
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_mem_walk"]
}
{
@ -22,10 +22,9 @@
correctness.
- Also test outstanding access on memories
'''
milestone: V1
stage: V1
tests: ["{name}{intf}_mem_partial_access"]
}
// TODO: add mem access with reset
]
}

View file

@ -16,9 +16,8 @@
- Above sequences will be run with `csr_rw_vseq` to ensure it won't affect CSR
accesses.
'''
milestone: V2S
stage: V2S
tests: ["{name}_passthru_mem_tl_intg_err"]
}
]
}

View file

@ -21,9 +21,8 @@
- Check that err_code/fault_status is updated correctly and preserved until reset.
- Verify any operations that follow fail (as applicable).
'''
milestone: V2S
stage: V2S
tests: ["{name}_sec_cm"]
}
]
}

View file

@ -21,9 +21,8 @@
- Check that err_code/fault_status is updated correctly and preserved until reset.
- Verify any operations that follow fail (as applicable).
'''
milestone: V2S
stage: V2S
tests: ["{name}_sec_cm"]
}
]
}

View file

@ -20,9 +20,8 @@
- Check that err_code/fault_status is updated correctly and preserved until reset.
- Verify any operations that follow fail (as applicable).
'''
milestone: V2S
stage: V2S
tests: ["{name}_sec_cm"]
}
]
}

View file

@ -21,9 +21,8 @@
- Check that err_code/fault_status is updated correctly and preserved until reset.
- Verify any operations that follow fail (as applicable).
'''
milestone: V2S
stage: V2S
tests: ["{name}_sec_cm"]
}
]
}

View file

@ -15,7 +15,7 @@
- Verify the update_error status register field is set to 1.
- Repeat the above steps a bunch of times.
'''
milestone: V2S
stage: V2S
tests: ["{name}_shadow_reg_errors"]
}
@ -31,7 +31,7 @@
- Verify the update_error status register field remains the same value.
- Repeat the above steps a bunch of times.
'''
milestone: V2S
stage: V2S
tests: ["{name}_shadow_reg_errors"]
}
@ -48,7 +48,7 @@
- Read all CSRs to ensure the DUT is properly reset.
- Repeat the above steps a bunch of times.
'''
milestone: V2S
stage: V2S
tests: ["{name}_shadow_reg_errors"]
}
@ -65,7 +65,7 @@
- Read all CSRs to ensure the DUT is properly reset.
- Repeat the above steps a bunch of times.
'''
milestone: V2S
stage: V2S
tests: ["{name}_shadow_reg_errors"]
}
@ -79,7 +79,7 @@
shadowed registers' write/read to be executed without aborting.
- Repeat the above steps a bunch of times.
'''
milestone: V2S
stage: V2S
tests: ["{name}_shadow_reg_errors_with_csr_rw"]
}
]

View file

@ -8,7 +8,7 @@
desc: '''This test runs 3 parallel threads - stress_all, tl_errors and random reset.
After reset is asserted, the test will read and check all valid CSR registers.
'''
milestone: V3
stage: V3
tests: ["{name}_stress_all_with_rand_reset"]
}
]

View file

@ -6,7 +6,7 @@
{
name: tl_d_oob_addr_access
desc: "Access out of bounds address and verify correctness of response / behavior"
milestone: V2
stage: V2
tests: ["{name}_tl_errors"]
}
{
@ -30,7 +30,7 @@
- read a WO (write-only) memory
- write a RO (read-only) memory
- write with `instr_type = True`'''
milestone: V2
stage: V2
tests: ["{name}_tl_errors"]
}
{
@ -38,7 +38,7 @@
desc: '''Drive back-to-back requests without waiting for response to ensure there is one
transaction outstanding within the TL device. Also, verify one outstanding when back-
to-back accesses are made to the same address.'''
milestone: V2
stage: V2
tests: ["{name}_csr_hw_reset",
"{name}_csr_rw",
"{name}_csr_aliasing",
@ -49,7 +49,7 @@
desc: '''Access CSR with one or more bytes of data.
For read, expect to return all word value of the CSR.
For write, enabling bytes should cover all CSR valid fields.'''
milestone: V2
stage: V2
tests: ["{name}_csr_hw_reset",
"{name}_csr_rw",
"{name}_csr_aliasing",
@ -63,7 +63,7 @@
Verify that triggers the correct fatal alert.
- Inject a fault at the onehot check in `u_reg.u_prim_reg_we_check` and verify the
corresponding fatal alert occurs'''
milestone: V2S
stage: V2S
tests: ["{name}_tl_intg_err", "{name}_sec_cm"]
}
]

View file

@ -12,6 +12,7 @@
uvm_test_seq: "{name}_stress_all_vseq"
// 10ms
run_opts: ["+test_timeout_ns=10000000000"]
run_timeout_mins: 180
}
]
}

View file

@ -1,15 +1,11 @@
// Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
{
tests: [
{
name: "{name}_stress_all"
uvm_test_seq: "{name}_stress_all_vseq"
// 10ms
run_opts: ["+test_timeout_ns=10000000000"]
}
// this contains stress_all and stress_all_with_rand_reset
{
import_cfgs: ["{proj_root}/hw/dv/tools/dvsim/tests/stress_all_test.hjson"]
tests: [
{
name: "{name}_stress_all_with_rand_reset"
uvm_test_seq: "{name}_common_vseq"
@ -17,6 +13,7 @@
// 10ms
"+test_timeout_ns=10000000000",
"+stress_seq={name}_stress_all_vseq"]
run_timeout_mins: 180
}
]
}

View file

@ -169,6 +169,8 @@
"-grade index testfile",
// Use simple ratio of total covered bins over total bins across cps & crs,
"-group ratio",
// Follow LRM naming conventions for array bins.
"-group lrm_bin_name",
// Compute overall coverage for per-instance covergroups individually rather
// than cumulatively.
"-group instcov_for_score",
@ -240,6 +242,9 @@
// Include hierarchies for both code coverage and assertions.
vcs_cov_cfg_file: ""
// Supply cov configuration file for -cm_fsmresetfilter.
vcs_fsm_reset_cov_cfg_file: ""
// Supply the cov exclusion files.
vcs_cov_excl_files: []
@ -279,10 +284,15 @@
"-cm_line contassign",
// Dump toggle coverage on mdas, array of structs and on ports only
"-cm_tgl mda+structarr+portsonly",
// Report condition coverage within tasks, functions and for loops.
"-cm_cond for+tf",
// Ignore initial blocks for coverage
"-cm_report noinitial",
// Filter unreachable/statically constant blocks
"-cm_noconst",
// Filter unreachable/statically constant blocks. seqnoconst does a more
// sophisticated analysis including NBA / assignment with delays.
"-cm_seqnoconst",
// Creates a constfile.txt indicating a list of detected constants.
"-diag noconst"
// Don't count coverage that's coming from zero-time glitches
"-cm_glitch 0",
// Ignore warnings about not applying cm_glitch to path and FSM
@ -290,7 +300,9 @@
// Coverage database output location
"-cm_dir {cov_db_dir}",
// The following option is to improve runtime performance
"-Xkeyopt=rtopt"
"-Xkeyopt=rtopt",
// Exclude FSM transitions that can only happen on reset
"-cm_fsmresetfilter {vcs_fsm_reset_cov_cfg_file}",
]
run_opts: [// Enable the required cov metrics

View file

@ -11,8 +11,10 @@
// Prim_alert/esc pairs are verified in FPV and DV testbenches.
-moduletree prim_alert_sender
-moduletree prim_alert_receiver
-moduletree prim_count
-moduletree prim_esc_sender
-moduletree prim_esc_receiver
-moduletree prim_onehot_check
-moduletree prim_prince // prim_prince is verified in a separate DV environment.
-moduletree prim_lfsr // prim_lfsr is verified in FPV.
// csr_assert_fpv is an auto-generated csr read assertion module. So only assertion coverage is
@ -25,8 +27,10 @@ begin tgl
+tree tb.dut 1
+module prim_alert_sender
+module prim_alert_receiver
+module prim_count
+module prim_esc_sender
+module prim_esc_receiver
+module prim_onehot_check
+module prim_prince
+module prim_lfsr
end

View file

@ -0,0 +1,11 @@
// Copyright lowRISC contributors.
// Licensed under the Apache License, Version 2.0, see LICENSE for details.
// SPDX-License-Identifier: Apache-2.0
// This file is used with -cm_fsmresetfilter to exclude FSM transitions that can only happen on
// reset.
// Format: signal=<reset_signal_name> case=TRUE/FALSE (indicates reset is high/low active)
signal=rst_n case=FALSE
signal=rst_ni case=FALSE
signal=rst_aon_ni case=FALSE

View file

@ -33,9 +33,6 @@ set_statement_scoring
// arrays , vectors, packed union, modport and generate blocks.
set_toggle_scoring -sv_enum enable_mda -sv_struct_with_enum -sv_modport -sv_mda 16 -sv_mda_of_struct -sv_generate -sv_packed_union
// Enables scoring of reset states and transitions for identified FSMs.
set_fsm_reset_scoring
// Enable toggle coverage only on ports.
set_toggle_portsonly

View file

@ -14,8 +14,10 @@ deselect_coverage -betfs -module pins_if
deselect_coverage -betfs -module clk_rst_if
deselect_coverage -betfs -module prim_alert_sender...
deselect_coverage -betfs -module prim_alert_receiver...
deselect_coverage -betfs -module prim_count...
deselect_coverage -betfs -module prim_esc_sender...
deselect_coverage -betfs -module prim_esc_receiver...
deselect_coverage -betfs -module prim_onehot_check...
deselect_coverage -betfs -module prim_prince...
deselect_coverage -betfs -module prim_lfsr...
@ -37,8 +39,10 @@ select_coverage -toggle -module ${DUT_TOP}
select_coverage -toggle -module prim_alert_sender
select_coverage -toggle -module prim_alert_receiver
select_coverage -toggle -module prim_count
select_coverage -toggle -module prim_esc_sender
select_coverage -toggle -module prim_esc_receiver
select_coverage -toggle -module prim_onehot_check
select_coverage -toggle -module prim_prince
select_coverage -toggle -module prim_lfsr

View file

@ -11,10 +11,11 @@ Specification section on shared primitives.
## Parameters
Name | type | Description
-----|------|-------------
InW | int | Input data width
OutW | int | Output data width
Name | type | Description
-------------|------|-------------------------------------
InW | int | Input data width
OutW | int | Output data width
EnProtection | bit | Check FI attack on position counter
## Signal Interfaces
@ -30,6 +31,7 @@ mask_o[OutW] | output | Output bit mask.
ready_i | input | Output data can be drained.
flush_i | input | Send out stored data and clear state.
flush_done_o | output | Indicates flush operation is completed.
err_o | output | When EnProtection is set, the error is reported through this port. This signal is asynchronous to the datapath.
# Theory of Opeations

View file

@ -13,7 +13,7 @@
- If the alert is fatal, verify if the alert continuous fires until a reset is
issued.
'''
milestone: V1
stage: V1
tests: ["prim_async_alert",
"prim_async_fatal_alert",
"prim_sync_alert",
@ -27,7 +27,7 @@
- Send an alert test request by driving `alert_test` pin to 1.
- Verify that alert handshake completes and `alert_ack` signal stays low.
'''
milestone: V1
stage: V1
tests: ["prim_async_alert",
"prim_async_fatal_alert",
"prim_sync_alert",
@ -41,7 +41,7 @@
- Send a ping request by driving `ping_req` pin to 1.
- Verify that `ping_ok` signal is set and ping handshake completes.
'''
milestone: V1
stage: V1
tests: ["prim_async_alert",
"prim_async_fatal_alert",
"prim_sync_alert",
@ -62,7 +62,7 @@
drive `init_trigger_i` in prim_alert_receiver.
Check `ping_ok` returns 1.
'''
milestone: V2
stage: V2
tests: ["prim_async_alert",
"prim_async_fatal_alert",
"prim_sync_alert",
@ -84,7 +84,7 @@
- Verify that prim_alert_receiver can identify the integrity error by setting
`integ_fail_o` output to 1.
'''
milestone: V1
stage: V1
tests: ["prim_async_alert",
"prim_async_fatal_alert",
"prim_sync_alert",
@ -101,7 +101,7 @@
and verify we will never miss or drop an alert handshake by expecting `alert_ack_o`
to return 1 after `alert_req` is sent.
'''
milestone: V3
stage: V3
tests: []
}
]

View file

@ -12,7 +12,7 @@
- Wait random length of cycles and verify `esc_en` output is set and `integ_fail`
output remains 0.
'''
milestone: V1
stage: V1
tests: ["prim_esc_test"]
}
@ -26,7 +26,7 @@
- Wait for `ping_ok` to set and `esc_req_out` to set.
- Check the sequence completes without any signal integrity error.
'''
milestone: V1
stage: V1
tests: ["prim_esc_test"]
}
@ -40,7 +40,7 @@
- Release the `esc_n` signal.
- Send a ping request and repeat the above sequence and checkings.
'''
milestone: V1
stage: V1
tests: ["prim_esc_test"]
}
@ -56,7 +56,7 @@
to 1.
- Reset the DUT to clear `esc_en` output.
'''
milestone: V1
stage: V1
tests: ["prim_esc_test"]
}
@ -70,7 +70,7 @@
- Verify that prim_esc_receiver detects the counter mismatch and set `esc_en` signal to
1.
'''
milestone: V1
stage: V1
tests: ["prim_esc_test"]
}
@ -83,7 +83,7 @@
- Verify that after reset, the prim_esc_sender and prim_esc_receiver pair functions
correctly by issuing the tests above.
'''
milestone: V1
stage: V1
tests: ["prim_esc_test"]
}

View file

@ -16,7 +16,7 @@ filesets:
- lowrisc:dv:dv_test_status
- lowrisc:dv:common_ifs
files:
- tb/prim_lfsr_tb.sv
- prim_lfsr_tb.sv
file_type: systemVerilogSource
targets:

View file

@ -37,6 +37,10 @@
}
]
// Since none of the tests use the "default" build mode, we need to indicate which build is the
// main build mode. See commit lowrisc/opentitan#51000a8 for more details.
primary_build_mode: prim_lfsr_dw_24
// dw_8 is only used for "smoke" sims, so coverage collection is not needed.
prim_lfsr_dw_8_vcs_cov_cfg_file: ""
prim_lfsr_dw_24_vcs_cov_cfg_file: "-cm_hier {proj_root}/hw/ip/prim/dv/prim_lfsr/data/prim_lfsr_cover.cfg"
@ -69,4 +73,3 @@
}
]
}

View file

@ -23,7 +23,8 @@ module prim_packer_tb #(
input ready_i,
input flush_i,
output logic flush_done_o
output logic flush_done_o,
output logic err_o
);
for (genvar k = 1; k <= 16; k++) begin : gen_prim_packer
@ -40,7 +41,8 @@ module prim_packer_tb #(
.mask_o (mask_o[16-k:0]),
.ready_i,
.flush_i,
.flush_done_o
.flush_done_o,
.err_o
);
end
@ -57,7 +59,8 @@ module prim_packer_tb #(
.mask_o (mask_o),
.ready_i,
.flush_i,
.flush_done_o
.flush_done_o,
.err_o
);
endmodule : prim_packer_tb

View file

@ -7,13 +7,13 @@
# runs simulation and checks expected output)
fail() {
echo >&2 "PRE-DV FAILURE: $@"
echo >&2 "PRE-DV FAILURE: $*"
exit 1
}
set -o pipefail
SCRIPT_DIR="$(dirname "$(readlink -e "$BASH_SOURCE")")"
SCRIPT_DIR="$(dirname "$(readlink -e "${BASH_SOURCE[0]}")")"
UTIL_DIR="$(readlink -e "$SCRIPT_DIR/../../../../../util")" || \
fail "Can't find OpenTitan util dir"
@ -21,11 +21,13 @@ source "$UTIL_DIR/build_consts.sh"
PREDV_DIR=$REPO_TOP/hw/ip/prim/pre_dv/prim_crc32
(cd $REPO_TOP;
(cd $REPO_TOP || exit;
fusesoc --cores-root=. run --target=sim --setup --build \
lowrisc:prim:crc32_sim || fail "HW Sim build failed")
RUN_LOG=`mktemp`
readonly RUN_LOG
# shellcheck disable=SC2064 # The RUN_LOG tempfile path should not change
trap "rm -rf $RUN_LOG" EXIT
timeout 5s \

View file

@ -309,7 +309,7 @@ module prim_fifo_async_sram_adapter #(
end : g_unused_rdata
// read clock domain rdata storage
logic store;
logic store_en;
// Karnough Map (r_sram_rvalid_i):
// rfifo_ack | 0 | 1 |
@ -318,13 +318,13 @@ module prim_fifo_async_sram_adapter #(
// 1 | 0 | 1 |
//
// stored = s.r.v && XNOR(stored, rptr_inc)
assign store = r_sram_rvalid_i && !(stored ^ rfifo_ack);
assign store_en = r_sram_rvalid_i && !(stored ^ rfifo_ack);
always_ff @(posedge clk_rd_i or negedge rst_rd_ni) begin
if (!rst_rd_ni) begin
stored <= 1'b 0;
rdata_q <= Width'(0);
end else if (store) begin
end else if (store_en) begin
stored <= 1'b 1;
rdata_q <= rdata_d;
end else if (!r_sram_rvalid_i && rfifo_ack) begin

View file

@ -8,9 +8,14 @@
// ICEBOX(#12958): Revise to send out the empty status.
module prim_packer #(
parameter int InW = 32,
parameter int OutW = 32,
parameter int HintByteData = 0 // If 1, The input/output are byte granularity
parameter int unsigned InW = 32,
parameter int unsigned OutW = 32,
// If 1, The input/output are byte granularity
parameter int HintByteData = 0,
// Turn on protect against FI for the pos variable
parameter bit EnProtection = 1'b 0
) (
input clk_i ,
input rst_ni,
@ -26,14 +31,18 @@ module prim_packer #(
input ready_i,
input flush_i, // If 1, send out remnant and clear state
output logic flush_done_o
output logic flush_done_o,
// When EnProtection is set, err_o raises an error case (position variable
// mismatch)
output logic err_o
);
localparam int Width = InW + OutW; // storage width
localparam int ConcatW = Width + InW; // Input concatenated width
localparam int PtrW = $clog2(ConcatW+1);
localparam int IdxW = prim_util_pkg::vbits(InW);
localparam int OnesCntW = $clog2(InW+1);
localparam int unsigned Width = InW + OutW; // storage width
localparam int unsigned ConcatW = Width + InW; // Input concatenated width
localparam int unsigned PtrW = $clog2(ConcatW+1);
localparam int unsigned IdxW = prim_util_pkg::vbits(InW);
localparam int unsigned OnesCntW = $clog2(InW+1);
logic valid_next, ready_next;
logic [Width-1:0] stored_data, stored_mask;
@ -41,7 +50,7 @@ module prim_packer #(
logic [ConcatW-1:0] shiftl_data, shiftl_mask;
logic [InW-1:0] shiftr_data, shiftr_mask;
logic [PtrW-1:0] pos_q, pos_d; // Current write position
logic [PtrW-1:0] pos_q; // Current write position
logic [IdxW-1:0] lod_idx; // result of Leading One Detector
logic [OnesCntW-1:0] inmask_ones; // Counting Ones for mask_i
@ -60,29 +69,86 @@ module prim_packer #(
end
logic [PtrW-1:0] pos_with_input;
assign pos_with_input = pos_q + PtrW'(inmask_ones);
always_comb begin
pos_d = pos_q;
pos_with_input = pos_q + PtrW'(inmask_ones);
if (EnProtection == 1'b 0) begin : g_pos_nodup
logic [PtrW-1:0] pos_d;
unique case ({ack_in, ack_out})
2'b00: pos_d = pos_q;
2'b01: pos_d = (int'(pos_q) <= OutW) ? '0 : pos_q - PtrW'(unsigned'(OutW));
2'b10: pos_d = pos_with_input;
2'b11: pos_d = (int'(pos_with_input) <= OutW) ? '0 : pos_with_input - PtrW'(unsigned'(OutW));
default: pos_d = pos_q;
endcase
end
always_comb begin
pos_d = pos_q;
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
pos_q <= '0;
end else if (flush_done) begin
pos_q <= '0;
end else begin
pos_q <= pos_d;
unique case ({ack_in, ack_out})
2'b00: pos_d = pos_q;
2'b01: pos_d = (int'(pos_q) <= OutW) ? '0 : pos_q - PtrW'(OutW);
2'b10: pos_d = pos_with_input;
2'b11: pos_d = (int'(pos_with_input) <= OutW) ? '0 : pos_with_input - PtrW'(OutW);
default: pos_d = pos_q;
endcase
end
end
always_ff @(posedge clk_i or negedge rst_ni) begin
if (!rst_ni) begin
pos_q <= '0;
end else if (flush_done) begin
pos_q <= '0;
end else begin
pos_q <= pos_d;
end
end
assign err_o = 1'b 0; // No checker logic
end else begin : g_pos_dupcnt // EnProtection == 1'b 1
// incr_en: Increase the pos by cnt_step. ack_in && !ack_out
// decr_en: Decrease the pos by cnt_step. !ack_in && ack_out
// set_en: Set to specific value in case of ack_in && ack_out.
// This case, the value could be increased or descreased based on
// the input size (inmask_ones)
logic cnt_incr_en, cnt_decr_en, cnt_set_en;
logic [PtrW-1:0] cnt_step, cnt_set;
assign cnt_incr_en = ack_in && !ack_out;
assign cnt_decr_en = !ack_in && ack_out;
assign cnt_set_en = ack_in && ack_out;
// counter has underflow protection.
assign cnt_step = (cnt_incr_en) ? PtrW'(inmask_ones) : PtrW'(OutW);
always_comb begin : cnt_set_logic
// default, consuming all data
cnt_set = '0;
if (pos_with_input > PtrW'(OutW)) begin
// pos_q + inmask_ones is bigger than Output width. Still data remained.
cnt_set = pos_with_input - PtrW'(OutW);
end
end : cnt_set_logic
prim_count #(
.Width (PtrW),
.ResetValue ('0 )
) u_pos (
.clk_i,
.rst_ni,
.clr_i (flush_done),
.set_i (cnt_set_en),
.set_cnt_i (cnt_set ),
.incr_en_i (cnt_incr_en),
.decr_en_i (cnt_decr_en),
.step_i (cnt_step ),
.cnt_o (pos_q ), // Current counter state
.cnt_next_o ( ), // Next counter state
.err_o
);
end // g_pos_dupcnt
//---------------------------------------------------------------------------
// Leading one detector for mask_i

View file

@ -3,6 +3,14 @@
// SPDX-License-Identifier: Apache-2.0
//
// Component handling register CDC
//
// Currently, this module only works correctly when paired with tlul_adapter_reg.
// This is because tlul_adapter_reg does not emit a new transaction to the same
// register if it discovers it is currently busy. Please see the BusySrcReqChk_A
// assertion below for more details.
//
// If in the future this assumption changes, we can modify this module easily to
// support the new behavior.
`include "prim_assert.sv"
@ -91,7 +99,8 @@ module prim_reg_cdc #(
if (!rst_src_ni) begin
src_q <= ResetVal;
txn_bits_q <= '0;
end else if (src_req && !busy) begin
end else if (src_req) begin
// See assertion below
// At the beginning of a software initiated transaction, the following
// values are captured in the src_q/txn_bits_q flops to ensure they cannot
// change for the duration of the synchronization operation.
@ -111,6 +120,15 @@ module prim_reg_cdc #(
end
end
// The current design (tlul_adapter_reg) does not spit out a request if the destination it chooses
// (decoded from address) is busy. So this creates a situation in the current design where
// src_req_i and busy can never be high at the same time.
// While the code above could be coded directly to be expressed as `src_req & !busy`, which makes
// the intent clearer, it ends up causing coverage holes from the tool's perspective since that
// condition cannot be met.
// Thus we add an assertion here to ensure the condition is always satisfied.
`ASSERT(BusySrcReqChk_A, busy |-> !src_req, clk_src_i, !rst_src_ni)
// reserved bits are not used
logic unused_wd;
assign unused_wd = ^src_wd_i;
@ -169,21 +187,4 @@ module prim_reg_cdc #(
// If busy goes high, we must eventually see an ack
`ASSERT(HungHandShake_A, $rose(src_req) |-> strong(##[0:$] src_ack), clk_src_i, !rst_src_ni)
`ifdef INC_ASSERT
logic async_flag;
always_ff @(posedge clk_dst_i or negedge rst_dst_ni or posedge src_update) begin
if (!rst_src_ni) begin
async_flag <= '0;
end else if (src_update) begin
async_flag <= '0;
end else if (dst_update_i) begin
async_flag <= 1'b1;
end
end
// once hardware makes an update request, we must eventually see an update pulse
`ASSERT(ReqTimeout_A, $rose(async_flag) |-> strong(##[0:$] src_update), clk_src_i, !rst_src_ni)
`endif
endmodule // prim_subreg_cdc

View file

@ -152,13 +152,26 @@ module prim_reg_cdc_arb #(
id_q <= SelSwReq;
end else if (dst_req && dst_lat_d) begin
id_q <= SelSwReq;
end else if (dst_update_i && dst_lat_d) begin
end else if (!dst_req && dst_lat_d) begin
id_q <= SelHwReq;
end else if (dst_lat_q) begin
id_q <= SelHwReq;
end
end
// if a destination update is received when the system is idle and there is no
// software side request, hw update must be selected.
`ASSERT(DstUpdateReqCheck_A, ##1 dst_update_i & !dst_req & !busy |=> id_q == SelHwReq,
clk_dst_i, !rst_dst_ni)
// if hw select was chosen, then it must be the case there was a destination update
// indication or there was a difference between the transit register and the
// latest incoming value.
`ASSERT(HwIdSelCheck_A, $rose(id_q == SelHwReq) |-> $past(dst_update_i, 1) ||
$past(dst_lat_q, 1),
clk_dst_i, !rst_dst_ni)
// send out prim_subreg request only when proceeding
// with software request
assign dst_req_o = ~busy & dst_req;
@ -224,6 +237,27 @@ module prim_reg_cdc_arb #(
assign src_ack_o = src_req & (id_q == SelSwReq);
assign src_update_o = src_req & (id_q == SelHwReq);
// once hardware makes an update request, we must eventually see an update pulse
`ASSERT(ReqTimeout_A, $rose(id_q == SelHwReq) |-> s_eventually(src_update_o),
clk_src_i, !rst_src_ni)
`ifdef INC_ASSERT
logic async_flag;
always_ff @(posedge clk_dst_i or negedge rst_dst_ni or posedge src_update_o) begin
if (!rst_dst_ni) begin
async_flag <= '0;
end else if (src_update_o) begin
async_flag <= '0;
end else if (dst_update_i && !dst_req_o && !busy) begin
async_flag <= 1'b1;
end
end
// once hardware makes an update request, we must eventually see an update pulse
`ASSERT(UpdateTimeout_A, $rose(async_flag) |-> s_eventually(src_update_o),
clk_src_i, !rst_src_ni)
`endif
end else begin : gen_passthru
// when there is no possibility of conflicting HW transactions,
// we can assume that dst_qs_i will only ever take on the value
@ -250,4 +284,5 @@ module prim_reg_cdc_arb #(
end
endmodule

View file

@ -67,7 +67,7 @@ module prim_xoshiro256pp #(
(xoshiro_en_i && lockup) ? DefaultSeed :
(xoshiro_en_i) ? next_xoshiro_state : xoshiro_q;
always_ff @(posedge clk_i or negedge rst_ni) begin : reg_state
always_ff @(posedge clk_i or negedge rst_ni) begin : p_reg_state
if (!rst_ni) begin
xoshiro_q <= DefaultSeed;
end else begin

View file

@ -7,6 +7,8 @@
waive -rules TRI_DRIVER -regexp {'inout_io' is driven by a tristate driver} -location {prim_generic_pad_wrapper.sv} \
-comment "This is a bidirectional pad inout."
waive -rules TRI_DRIVER -regexp {'in_raw_o' is driven by a tristate driver} \
-comment "This is a bidirectional pad inout."
waive -rules MULTI_DRIVEN -regexp {.* drivers on 'inout_io' here} -location {prim_generic_pad_wrapper.sv} \
-comment "The pad simulation model has multiple drivers to emulate different IO terminations."
waive -rules SELF_ASSIGN -regexp {LHS signal 'inout_io' encountered on the RHS of a continuous assignment statement} -location {prim_generic_pad_wrapper.sv} \
@ -21,3 +23,5 @@ waive -rules PARAM_NOT_USED -regexp {Parameter 'Variant' not used in module 'pri
-comment "This parameter has been provisioned for later and is currently unused."
waive -rules PARAM_NOT_USED -regexp {Parameter 'ScanRole' not used in module 'prim_generic_pad_wrapper'} -location {prim_generic_pad_wrapper.sv} \
-comment "This parameter has been provisioned for later and is currently unused."
waive -rules INPUT_NOT_READ -msg {Input port 'clk_scan_i' is not read from in module 'prim_generic_pad_wrapper'} \
-comment "This clock is not read in RTL since it will be connected after synthesis during DFT insertion"

View file

@ -13,9 +13,6 @@ module prim_generic_flash #(
parameter int PagesPerBank = 256,// data pages per bank
parameter int WordsPerPage = 256,// words per page
parameter int DataWidth = 32, // bits per word
parameter int ModelOnlyReadLatency = 1, // generic model read latency
parameter int ModelOnlyProgLatency = 50, // generic model program latency
parameter int ModelOnlyEraseLatency = 200, // generic model program latency
parameter int TestModeWidth = 2
) (
input clk_i,
@ -46,9 +43,6 @@ module prim_generic_flash #(
input devmode_i
);
localparam int CfgRegs = 21;
localparam int CfgAddrWidth = $clog2(CfgRegs);
logic unused_devmode;
assign unused_devmode = devmode_i;
@ -71,10 +65,7 @@ module prim_generic_flash #(
.InfoTypesWidth(InfoTypesWidth),
.PagesPerBank(PagesPerBank),
.WordsPerPage(WordsPerPage),
.DataWidth(DataWidth),
.ModelOnlyReadLatency(ModelOnlyReadLatency),
.ModelOnlyProgLatency(ModelOnlyProgLatency),
.ModelOnlyEraseLatency(ModelOnlyEraseLatency)
.DataWidth(DataWidth)
) u_prim_flash_bank (
.clk_i,
.rst_ni,

View file

@ -12,9 +12,6 @@ module prim_generic_flash_bank #(
parameter int PagesPerBank = 256, // data pages per bank
parameter int WordsPerPage = 256, // words per page
parameter int DataWidth = 32, // bits per word
parameter int ModelOnlyReadLatency = 1, // generic model read latency
parameter int ModelOnlyProgLatency = 50, // generic model program latency
parameter int ModelOnlyEraseLatency = 200, // generic model program latency
// Derived parameters
localparam int PageW = $clog2(PagesPerBank),
@ -45,6 +42,23 @@ module prim_generic_flash_bank #(
input flash_power_down_h_i
);
`ifdef SYNTHESIS
localparam int ReadLatency = 1;
localparam int ProgLatency = 50;
localparam int EraseLatency = 200;
`else
int ReadLatency = 1;
int ProgLatency = 50;
int EraseLatency = 200;
initial begin
void'($value$plusargs("flash_read_latency=%0d", ReadLatency));
void'($value$plusargs("flash_program_latency=%0d", ProgLatency));
void'($value$plusargs("flash_erase_latency=%0d", EraseLatency));
end
`endif
// Emulated flash macro values
localparam int BkEraseCycles = 2000;
localparam int InitCycles = 100;
@ -204,11 +218,7 @@ module prim_generic_flash_bank #(
end
// if read cycle is only 1, we can expose the unlatched data directly
if (ModelOnlyReadLatency == 1) begin : gen_fast_rd_data
assign rd_data_o = rd_data_d;
end else begin : gen_rd_data
assign rd_data_o = rd_data_q;
end
assign rd_data_o = ReadLatency == 1 ? rd_data_d : rd_data_q;
// prog_pend_q is necessary to emulate flash behavior that a bit written to 0 cannot be written
// back to 1 without an erase
@ -281,7 +291,7 @@ module prim_generic_flash_bank #(
end else if (pg_erase_req) begin
st_d = StErase;
index_limit_d = WordsPerPage;
time_limit_d = ModelOnlyEraseLatency;
time_limit_d = EraseLatency;
end else if (bk_erase_req) begin
st_d = StErase;
index_limit_d = WordsPerBank;
@ -290,7 +300,7 @@ module prim_generic_flash_bank #(
end
StRead: begin
if (time_cnt < ModelOnlyReadLatency) begin
if (time_cnt < ReadLatency) begin
time_cnt_inc = 1'b1;
end else if (!prog_pend_q) begin
@ -318,7 +328,7 @@ module prim_generic_flash_bank #(
StProg: begin
// if data is already 0, cannot program to 1 without erase
mem_wdata = cmd_q.prog_data & rd_data_q;
if (time_cnt < ModelOnlyProgLatency) begin
if (time_cnt < ProgLatency) begin
mem_req = 1'b1;
mem_wr = 1'b1;
time_cnt_inc = 1'b1;

View file

@ -24,7 +24,7 @@ module prim_generic_flop_en #(
.in_i(en_i),
.out_o(en)
);
end else begin : gen_en
end else begin : gen_en_no_sec_buf
assign en = en_i;
end

View file

@ -24,7 +24,6 @@ module prim_generic_pad_attr
//
// - inversion
// - virtual open drain
// - keeper
// - pullup / pulldown
// - 1 driving strength bit
//
@ -47,7 +46,6 @@ module prim_generic_pad_attr
attr_warl_o = '0;
attr_warl_o.invert = 1'b1;
attr_warl_o.virt_od_en = 1'b1;
attr_warl_o.keep_en = 1'b1;
// Driving strength and pulls are not supported by Verilator
`ifndef VERILATOR
attr_warl_o.pull_en = 1'b1;

View file

@ -38,6 +38,7 @@ module prim_generic_pad_wrapper
attr_i.drive_strength[3:1],
attr_i.od_en,
attr_i.schmitt_en,
attr_i.keep_en,
scanmode_i,
pok_i};
@ -80,14 +81,13 @@ module prim_generic_pad_wrapper
assign (pull0, pull1) inout_io = (oe && !attr_i.drive_strength[0]) ? out : 1'bz;
// pullup / pulldown termination
assign (weak0, weak1) inout_io = attr_i.pull_en ? attr_i.pull_select : 1'bz;
// fake trireg emulation
assign (weak0, weak1) inout_io = (attr_i.keep_en) ? inout_io : 1'bz;
`endif
end else if (PadType == AnalogIn0 || PadType == AnalogIn1) begin : gen_analog
logic unused_ana_sigs;
assign unused_ana_sigs = ^{attr_i, out_i, oe_i, ie_i};
assign inout_io = 1'bz; // explicitly make this tristate to avoid lint errors.
assign in_o = inout_io;
assign in_raw_o = inout_io;

View file

@ -5,14 +5,7 @@ r'''
Class describing lint configuration object
'''
import logging as log
from pathlib import Path
from tabulate import tabulate
from LintCfg import LintCfg
from utils import subst_wildcards, check_bool
from MsgBuckets import MsgBuckets
class CdcCfg(LintCfg):

View file

@ -287,7 +287,7 @@ class Deploy():
def get_timeout_mins(self):
"""Returns the timeout in minutes."""
return 0
return None
def extract_info_from_log(self, log_text: List):
"""Extracts information pertaining to the job from its log.
@ -378,8 +378,8 @@ class CompileSim(Deploy):
if self.sim_cfg.args.build_timeout_mins is not None:
self.build_timeout_mins = self.sim_cfg.args.build_timeout_mins
if self.build_timeout_mins:
log.log(VERBOSE, "Compile timeout for job \"%s\" is %d minutes",
self.name, self.build_timeout_mins)
log.debug("Timeout for job \"%s\" is %d minutes.",
self.name, self.build_timeout_mins)
def pre_launch(self):
# Delete old coverage database directories before building again. We
@ -387,8 +387,11 @@ class CompileSim(Deploy):
rm_path(self.cov_db_dir)
def get_timeout_mins(self):
"""Returns the timeout in minutes."""
return self.build_timeout_mins
"""Returns the timeout in minutes.
Limit build jobs to 60 minutes if the timeout is not set.
"""
return self.build_timeout_mins if self.build_timeout_mins else 60
class CompileOneShot(Deploy):
@ -439,12 +442,15 @@ class CompileOneShot(Deploy):
if self.sim_cfg.args.build_timeout_mins is not None:
self.build_timeout_mins = self.sim_cfg.args.build_timeout_mins
if self.build_timeout_mins:
log.log(VERBOSE, "Compile timeout for job \"%s\" is %d minutes",
self.name, self.build_timeout_mins)
log.debug("Timeout for job \"%s\" is %d minutes.",
self.name, self.build_timeout_mins)
def get_timeout_mins(self):
"""Returns the timeout in minutes."""
return self.build_timeout_mins
"""Returns the timeout in minutes.
Limit build jobs to 60 minutes if the timeout is not set.
"""
return self.build_timeout_mins if self.build_timeout_mins else 60
class RunTest(Deploy):
@ -518,8 +524,8 @@ class RunTest(Deploy):
if self.sim_cfg.args.run_timeout_mins is not None:
self.run_timeout_mins = self.sim_cfg.args.run_timeout_mins
if self.run_timeout_mins:
log.log(VERBOSE, "Run timeout for job \"%s\" is %d minutes",
self.name, self.run_timeout_mins)
log.debug("Timeout for job \"%s\" is %d minutes.",
self.full_name, self.run_timeout_mins)
def pre_launch(self):
self.launcher.renew_odir = True
@ -543,8 +549,11 @@ class RunTest(Deploy):
return RunTest.seeds.pop(0)
def get_timeout_mins(self):
"""Returns the timeout in minutes."""
return self.run_timeout_mins
"""Returns the timeout in minutes.
Limit run jobs to 60 minutes if the timeout is not set.
"""
return self.run_timeout_mins if self.run_timeout_mins else 60
def extract_info_from_log(self, log_text: List):
"""Extracts the time the design was simulated for, from the log."""
@ -605,11 +614,15 @@ class CovMerge(Deploy):
def __init__(self, run_items, sim_cfg):
# Construct the cov_db_dirs right away from the run_items. This is a
# special variable used in the HJson.
# special variable used in the HJson. The coverage associated with
# the primary build mode needs to be first in the list.
self.cov_db_dirs = []
for run in run_items:
if run.cov_db_dir not in self.cov_db_dirs:
self.cov_db_dirs.append(run.cov_db_dir)
if sim_cfg.primary_build_mode == run.build_mode:
self.cov_db_dirs.insert(0, run.cov_db_dir)
else:
self.cov_db_dirs.append(run.cov_db_dir)
# Early lookup the cov_merge_db_dir, which is a mandatory misc
# attribute anyway. We need it to compute additional cov db dirs.

View file

@ -42,7 +42,8 @@ class FlowCfg():
def __init__(self, flow_cfg_file, hjson_data, args, mk_config):
# Options set from command line
self.items = args.items
# Uniquify input items, while preserving the order.
self.items = list(dict.fromkeys(args.items))
self.list_items = args.list
self.select_cfgs = args.select_cfgs
self.flow_cfg_file = flow_cfg_file
@ -87,29 +88,25 @@ class FlowCfg():
self.results_title = ""
self.revision = ""
self.results_server_prefix = ""
self.results_server_url_prefix = ""
self.results_server_cmd = ""
self.css_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "style.css")
# `self.results_path` will be updated after `self.rel_path` and
# `self.results_*` below will be updated after `self.rel_path` and
# `self.scratch_base_root` variables are updated.
self.results_path = ""
self.results_dir = ""
self.results_page = ""
self.results_server_path = ""
self.results_server_dir = ""
self.results_server_html = ""
self.results_server_page = ""
self.results_summary_server_html = ""
self.results_summary_server_page = ""
self.results_server_url = ""
self.results_html_name = ""
# Full results in md text
self.results_md = ""
# Selectively sanitized md results to be mailed out or published
self.email_results_md = ""
# Selectively sanitized md results to be published
self.publish_results_md = ""
self.sanitize_email_results = False
self.sanitize_publish_results = False
# Summary results, generated by over-arching cfg
self.email_summary_md = ""
self.results_summary_md = ""
# Merge in the values from the loaded hjson file. If subclasses want to
@ -142,9 +139,19 @@ class FlowCfg():
# _expand and add the code at the start.
self._expand()
# Construct the result_path Path variable after variable expansion.
self.results_path = (Path(self.scratch_base_path) / "reports" /
self.rel_path / self.timestamp)
# Construct the path variables after variable expansion.
self.results_dir = (Path(self.scratch_base_path) / "reports" /
self.rel_path / "latest")
self.results_page = (self.results_dir /
self.results_html_name)
tmp_path = self.results_server + "/" + self.rel_path
self.results_server_path = self.results_server_prefix + tmp_path
tmp_path += "/latest"
self.results_server_dir = self.results_server_prefix + tmp_path
tmp_path += "/" + self.results_html_name
self.results_server_page = self.results_server_prefix + tmp_path
self.results_server_url = "https://" + tmp_path
# Run any final checks
self._post_init()
@ -406,15 +413,15 @@ class FlowCfg():
result = item._gen_results(results)
log.info("[results]: [%s]:\n%s\n", item.name, result)
log.info("[scratch_path]: [%s] [%s]", item.name, item.scratch_path)
item.write_results_html("results.html", item.results_md)
log.log(VERBOSE, "[report]: [%s] [%s/results.html]", item.name,
item.results_path)
item.write_results_html(self.results_html_name, item.results_md)
log.log(VERBOSE, "[report]: [%s] [%s/report.html]", item.name,
item.results_dir)
self.errors_seen |= item.errors_seen
if self.is_primary_cfg:
self.gen_results_summary()
self.write_results_html("summary.html", self.results_summary_md)
self.gen_email_html_summary()
self.write_results_html(self.results_html_name,
self.results_summary_md)
def gen_results_summary(self):
'''Public facing API to generate summary results for each IP/cfg file
@ -422,47 +429,29 @@ class FlowCfg():
return
def write_results_html(self, filename, text_md):
"""Converts md text to HTML and writes to file in results_path area."""
"""Converts md text to HTML and writes to results_dir area."""
# Prepare reports directory, keeping 90 day history.
if not self.results_path.is_dir():
clean_odirs(odir=self.results_path, max_odirs=89)
mk_path(self.results_path)
clean_odirs(odir=self.results_dir, max_odirs=89)
mk_path(self.results_dir)
# Write results to the report area.
with open(self.results_path / filename, "w") as f:
with open(self.results_dir / filename, "w") as f:
f.write(
md_results_to_html(self.results_title, self.css_file, text_md))
def _get_results_page_link(self, link_text):
if not self.args.publish:
return link_text
results_page_url = self.results_server_page.replace(
self.results_server_prefix, self.results_server_url_prefix)
return "[%s](%s)" % (link_text, results_page_url)
def _get_results_page_link(self, relative_to, link_text=''):
"""Create a relative markdown link to the results page."""
def gen_email_html_summary(self):
if self.is_primary_cfg:
# user can customize email content by using email_summary_md,
# otherwise default to send out results_summary_md
gen_results = self.email_summary_md or self.results_summary_md
else:
gen_results = self.email_results_md or self.results_md
fname = f"{self.scratch_root}/{self.name}-{self.flow}"
if self.tool:
fname = f"{fname}-{self.tool}"
fname = f"{fname}-report.html"
log.info("[results]: [email]: [%s]", fname)
with open(fname, "w") as f:
f.write(
md_results_to_html(self.results_title, self.css_file,
gen_results))
link_text = self.name.upper() if not link_text else link_text
relative_link = os.path.relpath(self.results_page,
relative_to)
return "[%s](%s)" % (link_text, relative_link)
def _publish_results(self):
'''Publish results to the opentitan web server.
Results are uploaded to {results_server_path}/latest/results.
Results are uploaded to {results_server_page}.
If the 'latest' directory exists, then it is renamed to its 'timestamp'
directory. If the list of directories in this area is > 14, then the
oldest entry is removed. Links to the last 7 regression results are
@ -473,16 +462,12 @@ class FlowCfg():
"results server")
return
# Construct the paths
results_page_url = self.results_server_page.replace(
self.results_server_prefix, self.results_server_url_prefix)
# Timeformat for moving the dir
tf = "%Y.%m.%d_%H.%M.%S"
# Extract the timestamp of the existing self.results_server_page
cmd = self.results_server_cmd + " ls -L " + self.results_server_page + \
" | grep \'Creation time:\'"
cmd = (self.results_server_cmd + " ls -L " +
self.results_server_page + " | grep \'Creation time:\'")
log.log(VERBOSE, cmd)
cmd_output = subprocess.run(cmd,
@ -561,14 +546,11 @@ class FlowCfg():
rm_cmd = ""
history_txt = "\n## Past Results\n"
history_txt += "- [Latest](" + results_page_url + ")\n"
history_txt += "- [Latest](../latest/" + self.results_html_name + ")\n"
if len(rdirs) > 0:
for i in range(len(rdirs)):
if i < 7:
rdir_url = self.results_server_path + '/' + rdirs[
i] + "/" + self.results_server_html
rdir_url = rdir_url.replace(self.results_server_prefix,
self.results_server_url_prefix)
rdir_url = '../' + rdirs[i] + "/" + self.results_html_name
history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url)
elif i > 14:
rm_cmd += self.results_server_path + '/' + rdirs[i] + " "
@ -583,10 +565,11 @@ class FlowCfg():
# Publish the results page.
# First, write the results html file to the scratch area.
self.write_results_html("publish.html", publish_results_md)
results_html_file = os.path.join(self.results_path, "publish.html")
results_html_file = self.results_dir / "publish.html"
log.info("Publishing results to %s", results_page_url)
cmd = (self.results_server_cmd + " cp " + results_html_file + " " +
log.info("Publishing results to %s", self.results_server_url)
cmd = (self.results_server_cmd + " cp " +
str(results_html_file) + " " +
self.results_server_page)
log.log(VERBOSE, cmd)
try:
@ -612,14 +595,11 @@ class FlowCfg():
'''Public facing API for publishing md format results to the opentitan
web server.
'''
results_page_url = self.results_summary_server_page.replace(
self.results_server_prefix, self.results_server_url_prefix)
# Publish the results page.
log.info("Publishing results summary to %s", results_page_url)
result_summary_path = os.path.join(self.results_path, "summary.html")
cmd = (self.results_server_cmd + " cp " + result_summary_path + " " +
self.results_summary_server_page)
log.info("Publishing results summary to %s", self.results_server_url)
cmd = (self.results_server_cmd + " cp " +
str(self.results_page) + " " +
self.results_server_page)
log.log(VERBOSE, cmd)
try:
cmd_output = subprocess.run(args=cmd,

View file

@ -9,7 +9,7 @@ import hjson
from tabulate import tabulate
from OneShotCfg import OneShotCfg
from utils import VERBOSE, subst_wildcards
from utils import subst_wildcards
class FormalCfg(OneShotCfg):
@ -131,7 +131,6 @@ class FormalCfg(OneShotCfg):
# Gathers the aggregated results from all sub configs
# The results_summary will only contain the passing rate and
# percentages of the stimuli, coi, and proven coverage
# The email_summary will contain all the information from results_md
results_str = "## " + self.results_title + " (Summary)\n\n"
results_str += "### " + self.timestamp_long + "\n"
if self.revision:
@ -155,40 +154,6 @@ class FormalCfg(OneShotCfg):
log.info("[result summary]: %s", self.results_summary_md)
# Generate email results summary
colalign = ("left", ) + ("center", ) * (len(self.header) - 1)
email_table = [self.header]
error_message = ""
for cfg in self.cfgs:
email_result = cfg.result.get("summary")
if email_result is not None:
email_table.append([
cfg.name,
str(email_result["errors"]) + " E ",
str(email_result["warnings"]) + " W ",
str(email_result["proven"]) + " G ",
str(email_result["cex"]) + " E ",
str(email_result["undetermined"]) + " W ",
str(email_result["covered"]) + " G ",
str(email_result["unreachable"]) + " E ",
email_result["pass_rate"],
email_result["cov_rate"]
])
messages = cfg.result.get("messages")
if messages is not None:
# TODO: temp disable printing out warnings in results_summary
# Will clean up formal warnings first, then display warnings
error = self.parse_dict_to_str(messages, ["warnings"])
if error:
error_message += "\n#### " + cfg.name + "\n"
error_message += error
if len(email_table) > 1:
self.email_summary_md = results_str + tabulate(
email_table, headers="firstrow", tablefmt="pipe", colalign=colalign)
self.email_summary_md += error_message
return self.results_summary_md
def _gen_results(self, results):

View file

@ -345,7 +345,7 @@ class Launcher:
status = "F"
err_msg = ErrorMessage(line_number=None,
message=f"{e}",
context=[])
context=[f"{e}"])
self.status = status
if self.status != "P":

View file

@ -76,10 +76,8 @@ class LintCfg(OneShotCfg):
keys = self.totals.get_keys(self.report_severities)
for cfg in self.cfgs:
results_page = f'{cfg.results_server_dir}/results.html'
results_page_url = results_page.replace(
cfg.results_server_prefix, cfg.results_server_url_prefix)
name_with_link = f'[{cfg.name.upper()}]({results_page_url})'
name_with_link = cfg._get_results_page_link(
self.results_dir)
row = [name_with_link]
row += cfg.result_summary.get_counts_md(keys)
@ -164,18 +162,15 @@ class LintCfg(OneShotCfg):
table, headers='firstrow', tablefmt='pipe',
colalign=colalign) + '\n'
# The email and published reports will default to self.results_md
# if they are empty. In case they need to be sanitized, override
# them and do not append detailed messages.
if self.sanitize_email_results:
self.email_results_md = self.results_md
# Th published report will default to self.results_md if they are
# empty. In case it needs need to be sanitized, override it and do
# not append detailed messages.
if self.sanitize_publish_results:
self.publish_results_md = self.results_md
# Locally generated result always contains all details.
self.results_md += fail_msgs
else:
self.results_md = f'{results_str}\nNo results to display.\n'
self.email_results_md = self.results_md
self.publish_results_md = self.results_md
return self.results_md

View file

@ -3,13 +3,11 @@
# SPDX-License-Identifier: Apache-2.0
import datetime
import logging as log
import os
import shlex
import subprocess
from Launcher import ErrorMessage, Launcher, LauncherError
from utils import VERBOSE
class LocalLauncher(Launcher):
@ -95,7 +93,7 @@ class LocalLauncher(Launcher):
'K',
ErrorMessage(line_number=None,
message=timeout_message,
context=[]))
context=[timeout_message]))
return 'K'
return 'D'

View file

@ -266,7 +266,7 @@ class LsfLauncher(Launcher):
line_number=None,
message="ERROR: Failed to open {}\n{}.".format(
self.bsub_out, e),
context=[]))
context=[e]))
return "F"
# Now that the job has completed, we need to determine its status.
@ -303,7 +303,7 @@ class LsfLauncher(Launcher):
if self.bsub_out_err_msg:
err_msg = ErrorMessage(line_number=None,
message=self.bsub_out_err_msg,
context=[])
context=[self.bsub_out_err_msg])
self._post_finish(status, err_msg)
return status
@ -402,4 +402,4 @@ class LsfLauncher(Launcher):
job._post_finish(
'F', ErrorMessage(line_number=None,
message=err_msg,
context=[]))
context=[err_msg]))

View file

@ -14,6 +14,7 @@ class Modes():
Abstraction for specifying collection of options called as 'modes'. This is
the base class which is extended for run_modes, build_modes, tests and regressions.
"""
def self_str(self):
'''
This is used to construct the string representation of the entire class object.
@ -52,7 +53,8 @@ class Modes():
for key in keys:
if key not in attrs:
log.error("Key %s in %s is invalid", key, mdict)
log.error(f"Key {key} in {mdict} is invalid. Supported "
f"attributes in {self.mname} are {attrs}")
sys.exit(1)
setattr(self, key, mdict[key])
@ -151,6 +153,7 @@ class Modes():
Process dependencies.
Return a list of modes objects.
'''
def merge_sub_modes(mode, parent, objs):
# Check if there are modes available to merge
sub_modes = mode.get_sub_modes()
@ -266,7 +269,7 @@ class BuildModes(Modes):
self.post_build_cmds = []
self.en_build_modes = []
self.build_opts = []
self.build_timeout_mins = 60
self.build_timeout_mins = None
self.pre_run_cmds = []
self.post_run_cmds = []
self.run_opts = []
@ -302,7 +305,7 @@ class RunModes(Modes):
self.uvm_test = ""
self.uvm_test_seq = ""
self.build_mode = ""
self.run_timeout_mins = 60
self.run_timeout_mins = None
self.sw_images = []
self.sw_build_device = ""
self.sw_build_opts = []
@ -332,7 +335,8 @@ class Tests(RunModes):
"build_mode": "",
"sw_images": [],
"sw_build_device": "",
"sw_build_opts": []
"sw_build_opts": [],
"run_timeout_mins": None
}
def __init__(self, tdict):
@ -348,6 +352,7 @@ class Tests(RunModes):
Process enabled run modes and the set build mode.
Return a list of test objects.
'''
def get_pruned_en_run_modes(test_en_run_modes, global_en_run_modes):
pruned_en_run_modes = []
for test_en_run_mode in test_en_run_modes:
@ -434,13 +439,13 @@ class Tests(RunModes):
global_build_opts, global_pre_run_cmds,
global_post_run_cmds, global_run_opts,
global_sw_images, global_sw_build_opts):
processed_build_modes = []
processed_build_modes = set()
for test in tests:
if test.build_mode.name not in processed_build_modes:
test.build_mode.pre_build_cmds.extend(global_pre_build_cmds)
test.build_mode.post_build_cmds.extend(global_post_build_cmds)
test.build_mode.build_opts.extend(global_build_opts)
processed_build_modes.append(test.build_mode.name)
processed_build_modes.add(test.build_mode.name)
test.pre_run_cmds.extend(global_pre_run_cmds)
test.post_run_cmds.extend(global_post_run_cmds)
test.run_opts.extend(global_run_opts)
@ -597,7 +602,7 @@ class Regressions(Modes):
regression_obj.test_names = Tests.item_names
else:
tests_objs = []
tests_objs = set()
regression_obj.test_names = regression_obj.tests
for test in regression_obj.tests:
test_obj = Modes.find_mode(test, sim_cfg.tests)
@ -606,8 +611,8 @@ class Regressions(Modes):
"Test \"%s\" added to regression \"%s\" not found!",
test, regression_obj.name)
continue
tests_objs.append(test_obj)
regression_obj.tests = tests_objs
tests_objs.add(test_obj)
regression_obj.tests = list(tests_objs)
# Return the list of tests
return regressions_objs

View file

@ -205,6 +205,14 @@ class SimCfg(FlowCfg):
if not hasattr(self, "build_mode"):
self.build_mode = 'default'
# Set the primary build mode. The coverage associated to this build
# is the main coverage. Some tools need this information. This is
# of significance only when there are multiple builds. If there is
# only one build, and its not the primary_build_mode, then we
# update the primary_build_mode to match what is built.
if not hasattr(self, "primary_build_mode"):
self.primary_build_mode = self.build_mode
# Create objects from raw dicts - build_modes, sim_modes, run_modes,
# tests and regressions, only if not a primary cfg obj
self._create_objects()
@ -302,8 +310,8 @@ class SimCfg(FlowCfg):
if self.testplan != "":
self.testplan = Testplan(self.testplan,
repo_top=Path(self.proj_root))
# Extract tests in each milestone and add them as regression target.
self.regressions.extend(self.testplan.get_milestone_regressions())
# Extract tests in each stage and add them as regression target.
self.regressions.extend(self.testplan.get_stage_regressions())
else:
# Create a dummy testplan with no entries.
self.testplan = Testplan(None, name=self.name)
@ -329,6 +337,7 @@ class SimCfg(FlowCfg):
style patterns. This method finds regressions and tests that match
these patterns.
'''
def _match_items(items: list, patterns: list):
hits = []
matched = set()
@ -453,6 +462,11 @@ class SimCfg(FlowCfg):
is_unique = True
for build in self.builds:
if build.is_equivalent_job(new_build):
# Discard `new_build` since it is equivalent to build. If
# `new_build` is the same as `primary_build_mode`, update
# `primary_build_mode` to match `build`.
if new_build.name == self.primary_build_mode:
self.primary_build_mode = build.name
new_build = build
is_unique = False
break
@ -461,6 +475,18 @@ class SimCfg(FlowCfg):
self.builds.append(new_build)
build_map[build_mode_obj] = new_build
# If there is only one build, set primary_build_mode to it.
if len(self.builds) == 1:
self.primary_build_mode = self.builds[0].name
# Check self.primary_build_mode is set correctly.
build_mode_names = set(b.name for b in self.builds)
if self.primary_build_mode not in build_mode_names:
log.error(f"\"primary_build_mode: {self.primary_build_mode}\" "
f"in {self.name} cfg is invalid. Please pick from "
f"{build_mode_names}.")
sys.exit(1)
# Update all tests to use the updated (uniquified) build modes.
for test in self.run_list:
if test.build_mode.name != build_map[test.build_mode].name:
@ -540,6 +566,7 @@ class SimCfg(FlowCfg):
is enabled, then the summary coverage report is also generated. The final
result is in markdown format.
'''
def indent_by(level):
return " " * (4 * level)
@ -665,10 +692,6 @@ class SimCfg(FlowCfg):
else:
self.results_summary["Coverage"] = "--"
# append link of detail result to block name
self.results_summary["Name"] = self._get_results_page_link(
self.results_summary["Name"])
if results.buckets:
self.errors_seen = True
results_str += "\n".join(create_bucket_report(results.buckets))
@ -694,14 +717,20 @@ class SimCfg(FlowCfg):
table = []
header = []
for cfg in self.cfgs:
row = cfg.results_summary.values()
row = cfg.results_summary
if row:
# convert name entry to relative link
row = cfg.results_summary
row["Name"] = cfg._get_results_page_link(
self.results_dir,
row["Name"])
# If header is set, ensure its the same for all cfgs.
if header:
assert header == cfg.results_summary.keys()
else:
header = cfg.results_summary.keys()
table.append(row)
table.append(row.values())
if table:
assert header
@ -725,7 +754,7 @@ class SimCfg(FlowCfg):
if self.cov_report_deploy is not None:
results_server_dir_url = self.results_server_dir.replace(
self.results_server_prefix, self.results_server_url_prefix)
self.results_server_prefix, "https://")
log.info("Publishing coverage results to %s",
results_server_dir_url)

View file

@ -391,11 +391,9 @@ class SynCfg(OneShotCfg):
msgs = self.result['messages'].get(key)
fail_msgs += print_msg_list("#### " + hdr, msgs, self.max_msg_count)
# the email and published reports will default to self.results_md if they are
# empty. in case they need to be sanitized, override them and do not append
# detailed messages.
if self.sanitize_email_results:
self.email_results_md = results_str
# Th published report will default to self.results_md if they are
# empty. In case it needs need to be sanitized, override it and do
# not append detailed messages.
if self.sanitize_publish_results:
self.publish_results_md = results_str

View file

@ -130,14 +130,14 @@ class Testpoint(Element):
It captures following information:
- name of the planned test
- a brief description indicating intent, stimulus and checking procedure
- the targeted milestone
- the targeted stage
- the list of actual developed tests that verify it
"""
kind = "testpoint"
fields = Element.fields + ["milestone", "tests"]
fields = Element.fields + ["stage", "tests"]
# Verification milestones.
milestones = ("N.A.", "V1", "V2", "V2S", "V3")
# Verification stages.
stages = ("N.A.", "V1", "V2", "V2S", "V3")
def __init__(self, raw_dict):
super().__init__(raw_dict)
@ -153,15 +153,15 @@ class Testpoint(Element):
self.not_mapped = True
def __str__(self):
return super().__str__() + (f" Milestone: {self.milestone}\n"
return super().__str__() + (f" Stage: {self.stage}\n"
f" Tests: {self.tests}\n")
def _validate(self):
super()._validate()
if self.milestone not in Testpoint.milestones:
raise ValueError(f"Testpoint milestone {self.milestone} is "
if self.stage not in Testpoint.stages:
raise ValueError(f"Testpoint stage {self.stage} is "
f"invalid:\n{self}\nLegal values: "
f"Testpoint.milestones")
f"Testpoint.stages")
# "tests" key must be list.
if not isinstance(self.tests, list):
@ -346,10 +346,10 @@ class Testplan:
print("Error: the testplan 'name' is not set!")
sys.exit(1)
# Represents current progress towards each milestone. Milestone = N.A.
# Represents current progress towards each stage. Stage = N.A.
# is used to indicate the unmapped tests.
self.progress = {}
for key in Testpoint.milestones:
for key in Testpoint.stages:
self.progress[key] = {
"total": 0,
"written": 0,
@ -463,17 +463,17 @@ class Testplan:
self._sort()
def _sort(self):
"""Sort testpoints by milestone and covergroups by name."""
self.testpoints.sort(key=lambda x: x.milestone)
"""Sort testpoints by stage and covergroups by name."""
self.testpoints.sort(key=lambda x: x.stage)
self.covergroups.sort(key=lambda x: x.name)
def get_milestone_regressions(self):
def get_stage_regressions(self):
regressions = defaultdict(set)
for tp in self.testpoints:
if tp.not_mapped:
continue
if tp.milestone in tp.milestones[1:]:
regressions[tp.milestone].update({t for t in tp.tests if t})
if tp.stage in tp.stages[1:]:
regressions[tp.stage].update({t for t in tp.tests if t})
# Build regressions dict into a hjson like data structure
return [{
@ -506,7 +506,7 @@ class Testplan:
if self.testpoints:
lines = [formatter("\n### Testpoints\n")]
header = ["Milestone", "Name", "Tests", "Description"]
header = ["Stage", "Name", "Tests", "Description"]
colalign = ("center", "center", "left", "left")
table = []
for tp in self.testpoints:
@ -517,7 +517,7 @@ class Testplan:
# Markdown and HTML mode by interspersing with '<br>' tags.
tests = "<br>\n".join(tp.tests)
table.append([tp.milestone, tp.name, tests, desc])
table.append([tp.stage, tp.name, tests, desc])
lines += [
tabulate(table,
headers=header,
@ -558,11 +558,11 @@ class Testplan:
"""Computes the testplan progress and the sim footprint.
totals is a list of Testpoint items that represent the total number
of tests passing for each milestone. The sim footprint is simply
of tests passing for each stage. The sim footprint is simply
the sum total of all tests run in the simulation, counted for each
milestone and also the grand total.
stage and also the grand total.
"""
ms = testpoint.milestone
ms = testpoint.stage
for tr in testpoint.test_results:
if not tr:
continue
@ -578,7 +578,7 @@ class Testplan:
self.progress[ms]["passing"] += 1
self.progress[ms]["written"] += 1
# Compute the milestone total & the grand total.
# Compute the stage total & the grand total.
totals[ms].test_results[0].passing += tr.passing
totals[ms].test_results[0].total += tr.total
if ms != "N.A.":
@ -586,13 +586,13 @@ class Testplan:
totals["N.A."].test_results[0].total += tr.total
totals = {}
# Create testpoints to represent the total for each milestone & the
# Create testpoints to represent the total for each stage & the
# grand total.
for ms in Testpoint.milestones:
for ms in Testpoint.stages:
arg = {
"name": "N.A.",
"desc": f"Total {ms} tests",
"milestone": ms,
"stage": ms,
"tests": [],
}
totals[ms] = Testpoint(arg)
@ -603,7 +603,7 @@ class Testplan:
arg = {
"name": "Unmapped tests",
"desc": "Unmapped tests",
"milestone": "N.A.",
"stage": "N.A.",
"tests": [],
}
unmapped = Testpoint(arg)
@ -617,8 +617,8 @@ class Testplan:
unmapped.test_results = [tr for tr in test_results if not tr.mapped]
_process_testpoint(unmapped, totals)
# Add milestone totals back into 'testpoints' and sort.
for ms in Testpoint.milestones[1:]:
# Add stage totals back into 'testpoints' and sort.
for ms in Testpoint.stages[1:]:
self.testpoints.append(totals[ms])
self._sort()
@ -627,11 +627,11 @@ class Testplan:
self.testpoints.append(unmapped)
self.testpoints.append(totals["N.A."])
# Compute the progress rate for each milestone.
for ms in Testpoint.milestones:
# Compute the progress rate for each stage.
for ms in Testpoint.stages:
stat = self.progress[ms]
# Remove milestones that are not targeted.
# Remove stages that are not targeted.
if stat["total"] == 0:
self.progress.pop(ms)
continue
@ -674,13 +674,13 @@ class Testplan:
assert self.test_results_mapped, "Have you invoked map_test_results()?"
header = [
"Milestone", "Name", "Tests", "Max Job Runtime", "Simulated Time",
"Stage", "Name", "Tests", "Max Job Runtime", "Simulated Time",
"Passing", "Total", "Pass Rate"
]
colalign = ('center', ) * 2 + ('left', ) + ('center', ) * 5
table = []
for tp in self.testpoints:
milestone = "" if tp.milestone == "N.A." else tp.milestone
stage = "" if tp.stage == "N.A." else tp.stage
tp_name = "" if tp.name == "N.A." else tp.name
for tr in tp.test_results:
if tr.total == 0 and not map_full_testplan:
@ -693,10 +693,10 @@ class Testplan:
tr.simulated_time)
table.append([
milestone, tp_name, tr.name, job_runtime, simulated_time,
stage, tp_name, tr.name, job_runtime, simulated_time,
tr.passing, tr.total, pass_rate
])
milestone = ""
stage = ""
tp_name = ""
text = "\n### Test Results\n"
@ -763,7 +763,7 @@ class Testplan:
# return the results summary as a dict.
total = self.testpoints[-1]
assert total.name == "N.A."
assert total.milestone == "N.A."
assert total.stage == "N.A."
tr = total.test_results[0]

View file

@ -28,10 +28,10 @@ The following attributes are used to define each testpoint, at minimum:
The recommended naming convention to follow is `<main_feature>_<sub_feature>_<sub_sub_feature_or_type>_...`.
This is no need to suffix (or prefix) the testpoint name with "test".
* **milestone: targeted verification milestone**
* **stage: targeted verification stage**
This is either `V1`, `V2`, `V2S` or `V3`.
It helps us track whether all of the testing requirements of a milestone have been achieved.
It helps us track whether all of the testing requirements of a verification stage have been achieved.
* **desc: testpoint description**
@ -45,7 +45,7 @@ The following attributes are used to define each testpoint, at minimum:
The testplan is written in the initial work stage of the verification [life-cycle]({{< relref "doc/project/development_stages#hardware-verification-stages" >}}).
Later, when the DV engineer writes the tests, they may not map one-to-one to a testpoint - it may be possible that a written test satisfactorilly addresses multiple testpoints; OR it may also be possible that a testpoint needs to be split into multiple smaller tests.
To cater to these needs, we provide the ability to set a list of written tests for each testpoint.
It is used to not only indicate the current progress so far into each milestone, but also map the simulation results to the testpoints to generate the final report table.
It is used to not only indicate the current progress so far into each verification stage, but also map the simulation results to the testpoints to generate the final report table.
This list is initially empty - it is gradually updated as tests are written.
Setting this list to `["N/A"]` will prevent this testpoint entry from being mapped to the simulation results.
The testpoint will however, still show up in the generated testplan table.
@ -63,8 +63,8 @@ The following attributes are used to define each testpoint, at minimum:
// Run this testpoint in gate level and with poweraware.
tags: ["gls", "pa"]
// Run this testpoint with mask ROM (will use test ROM by default).
tags: ["mask_rom"]
// Run this testpoint with ROM (will use test ROM by default).
tags: ["rom"]
// Run this testpoint as a post-Si test vector on the tester.
tags: ["vector"]
@ -80,7 +80,7 @@ Here's an example:
testpoints: [
{
name: feature1
milestone: V1
stage: V1
desc: '''**Goal**: High level goal of this test.
**Stimulus**: Describe the stimulus procedure.
@ -90,7 +90,7 @@ Here's an example:
}
{
name: feature2
milestone: V2
stage: V2
desc: '''**Goal**: High level goal of this test.
**Stimulus**: Describe the stimulus procedure.
@ -296,4 +296,4 @@ from dvsim.Testplan import Testplan
* Allow DUT and its imported testplans to have the same testpoint name as long as they are in separate files.
* The list of written tests are appended from both files.
* The descriptions are merged - its upto the user to ensure that it is still meaningful after the merge.
* Conflicting milestones are flagged as an error.
* Conflicting verification stages are flagged as an error.

View file

@ -7,7 +7,7 @@
name: csr
desc: '''Standard CSR suite of tests run from all valid interfaces to prove SW
accessibility.'''
milestone: V1
stage: V1
// {name} and {intf} are wildcards in tests
// importer needs to provide substitutions for these as string or a list
// if list, then substitution occurs on all values in the list
@ -19,4 +19,3 @@
}
]
}

View file

@ -19,15 +19,15 @@
the description on multiple lines like this (with 3 single-inverted commas.
Note that the subsequent lines are indented right below where the inverted
commas start.'''
// milestone for which this test is targeted for - V1, V2 or V3
milestone: V1
// verification stage for which this test is targeted for - V1, V2 or V3
stage: V1
// tests of actual written tests that maps to this entry
tests: ["{name}_smoke"]
}
{
name: feature1
desc: "A single line description with single double-inverted commas."
milestone: V2
stage: V2
// testplan entry with no tests added
tests: ["{name}_{intf}_feature1"]
}
@ -48,7 +48,7 @@
Start a new paragraph with two newlines.
'''
milestone: V2
stage: V2
// testplan entry with multiple tests added
tests: ["foo_feature2_type1",
"foo_feature2_type2",

View file

@ -568,7 +568,7 @@ def mk_path(path):
try:
Path(path).mkdir(parents=True, exist_ok=True)
except PermissionError as e:
log.fatal("Failed to create dirctory {}:\n{}.".format(path, e))
log.fatal("Failed to create directory {}:\n{}.".format(path, e))
sys.exit(1)
@ -597,17 +597,24 @@ def clean_odirs(odir, max_odirs, ts_format=TS_FORMAT):
remain after deletion.
"""
odir = Path(odir)
if os.path.exists(odir):
# If output directory exists, back it up.
ts = datetime.fromtimestamp(os.stat(odir).st_ctime).strftime(ts_format)
shutil.move(odir, "{}_{}".format(odir, ts))
# Prior to Python 3.9, shutil may run into an error when passing in
# Path objects (see https://bugs.python.org/issue32689). While this
# has been fixed in Python 3.9, string casts are added so that this
# also works with older versions.
shutil.move(str(odir), str(odir.with_name(ts)))
# Get list of past output directories sorted by creation time.
pdir = Path(odir).resolve().parent
pdir = odir.resolve().parent
if not pdir.exists():
return []
dirs = sorted([old for old in pdir.iterdir() if old.is_dir()],
dirs = sorted([old for old in pdir.iterdir() if (old.is_dir() and
old != 'summary')],
key=os.path.getctime,
reverse=True)

View file

@ -169,7 +169,7 @@ Please see description for more details.
The tool generates not only the UVM environment, but also the base test,
testbench, top level fusesoc core file with sim target, Makefile that already
includes the smoke and CSR test suite and more. With just a few tweaks, this
enables the user to reach the V1 milestone much quicker. Let's take `i2c_host`
enables the user to reach the V1 stage much quicker. Let's take `i2c_host`
as the argument passed for the name of the IP. The following is the list of
files generated with a brief description of their contents:

View file

@ -20,13 +20,13 @@
**Checks**:
- TBD
'''
milestone: V1
stage: V1
tests: ["${name}_smoke"]
}
{
name: feature1
desc: '''Add more test entries here like above.'''
milestone: V1
stage: V1
tests: []
}
]