From f69c6fbabda4a9a1d95fba3a1b2522e751929626 Mon Sep 17 00:00:00 2001 From: udinator Date: Fri, 27 Mar 2020 11:02:47 -0700 Subject: [PATCH] [dv] initial icache testbench (#711) * [dv] add vendor .hjson files for dv tools Signed-off-by: Udi Jonnalagadda * Update common_ifs to lowRISC/opentitan@0d7f7ac7 Update code from subdir hw/dv/sv/common_ifs in upstream repository https://github.com/lowRISC/opentitan to revision 0d7f7ac755d4e00811257027dd814edb2afca050 Signed-off-by: Udi Jonnalagadda * Update csr_utils to lowRISC/opentitan@0d7f7ac7 Update code from subdir hw/dv/sv/csr_utils in upstream repository https://github.com/lowRISC/opentitan to revision 0d7f7ac755d4e00811257027dd814edb2afca050 Signed-off-by: Udi Jonnalagadda * Update dv_lib to lowRISC/opentitan@0d7f7ac7 Update code from subdir hw/dv/sv/dv_lib in upstream repository https://github.com/lowRISC/opentitan to revision 0d7f7ac755d4e00811257027dd814edb2afca050 Signed-off-by: Udi Jonnalagadda * Update dvsim to lowRISC/opentitan@0d7f7ac7 Update code from subdir util/dvsim in upstream repository https://github.com/lowRISC/opentitan to revision 0d7f7ac755d4e00811257027dd814edb2afca050 Signed-off-by: Udi Jonnalagadda * Update uvmdvgen to lowRISC/opentitan@0d7f7ac7 Update code from subdir util/uvmdvgen in upstream repository https://github.com/lowRISC/opentitan to revision 0d7f7ac755d4e00811257027dd814edb2afca050 Signed-off-by: Udi Jonnalagadda * Update dv_utils to lowRISC/opentitan@0d7f7ac7 Update code from subdir hw/dv/sv/dv_utils in upstream repository https://github.com/lowRISC/opentitan to revision 0d7f7ac755d4e00811257027dd814edb2afca050 Signed-off-by: Udi Jonnalagadda * [dv] initial icache testbench Signed-off-by: Udi Jonnalagadda * [dv] add top_pkg and its core file to icache/dv Signed-off-by: Udi Jonnalagadda * [dv] update ibex_core and ibex_icache corefile dependencies Signed-off-by: Udi Jonnalagadda * [dv] add .vpd support for wave-dumping Signed-off-by: Udi Jonnalagadda --- doc/verification.rst | 98 +- .../ibex_mem_intf_agent.core | 27 + .../ibex_mem_intf_agent_pkg.sv | 1 - .../core_ibex/common/mem_model/mem_model.core | 20 + dv/uvm/data/common_modes.hjson | 38 + dv/uvm/data/common_project_cfg.hjson | 29 + dv/uvm/data/common_sim_cfg.hjson | 108 +++ dv/uvm/data/fusesoc.hjson | 12 + dv/uvm/data/sim.mk | 95 ++ dv/uvm/data/vcs/cover.cfg | 5 + dv/uvm/data/vcs/vcs.hjson | 156 ++++ dv/uvm/data/vcs/vcs_fsdb.tcl | 44 + dv/uvm/data/vcs/xprop.cfg | 4 + dv/uvm/data/xcelium/xcelium.hjson | 84 ++ dv/uvm/icache/data/ibex_icache_testplan.hjson | 30 + dv/uvm/icache/doc/ibex_icache_dv_plan.md | 99 ++ dv/uvm/icache/doc/tb.svg | 1 + dv/uvm/icache/dv/env/ibex_icache_env.core | 29 + dv/uvm/icache/dv/env/ibex_icache_env.sv | 41 + dv/uvm/icache/dv/env/ibex_icache_env_cfg.sv | 23 + dv/uvm/icache/dv/env/ibex_icache_env_cov.sv | 30 + dv/uvm/icache/dv/env/ibex_icache_env_pkg.sv | 33 + .../icache/dv/env/ibex_icache_scoreboard.sv | 67 ++ .../dv/env/ibex_icache_virtual_sequencer.sv | 16 + .../dv/env/seq_lib/ibex_icache_base_vseq.sv | 32 + .../dv/env/seq_lib/ibex_icache_common_vseq.sv | 18 + .../dv/env/seq_lib/ibex_icache_sanity_vseq.sv | 15 + .../dv/env/seq_lib/ibex_icache_vseq_list.sv | 7 + dv/uvm/icache/dv/ibex_icache_agent/README.md | 3 + .../ibex_icache_agent/ibex_icache_agent.core | 28 + .../dv/ibex_icache_agent/ibex_icache_agent.sv | 25 + .../ibex_icache_agent_cfg.sv | 15 + .../ibex_icache_agent_cov.sv | 18 + .../ibex_icache_agent_pkg.sv | 37 + .../ibex_icache_agent/ibex_icache_driver.sv | 37 + .../dv/ibex_icache_agent/ibex_icache_if.sv | 11 + .../dv/ibex_icache_agent/ibex_icache_item.sv | 14 + .../ibex_icache_agent/ibex_icache_monitor.sv | 43 + .../seq_lib/ibex_icache_base_seq.sv | 18 + .../seq_lib/ibex_icache_seq_list.sv | 5 + dv/uvm/icache/dv/ibex_icache_sim.core | 25 + dv/uvm/icache/dv/ibex_icache_sim_cfg.hjson | 58 ++ dv/uvm/icache/dv/tb/tb.sv | 41 + .../icache/dv/tests/ibex_icache_base_test.sv | 26 + dv/uvm/icache/dv/tests/ibex_icache_test.core | 19 + .../icache/dv/tests/ibex_icache_test_pkg.sv | 22 + dv/uvm/icache/dv/top_pkg.core | 20 + dv/uvm/icache/dv/top_pkg.sv | 25 + ibex_core.core | 2 +- ibex_icache.core | 20 + vendor/lowrisc_common_ifs.lock.hjson | 15 + vendor/lowrisc_common_ifs.vendor.hjson | 13 + vendor/lowrisc_csr_utils.lock.hjson | 15 + vendor/lowrisc_csr_utils.vendor.hjson | 13 + vendor/lowrisc_dv_lib.lock.hjson | 15 + vendor/lowrisc_dv_lib.vendor.hjson | 13 + vendor/lowrisc_dv_utils.lock.hjson | 15 + vendor/lowrisc_dv_utils.vendor.hjson | 14 + vendor/lowrisc_dvsim.lock.hjson | 15 + vendor/lowrisc_dvsim.vendor.hjson | 13 + vendor/lowrisc_ip/common_ifs/clk_if.sv | 29 + vendor/lowrisc_ip/common_ifs/clk_rst_if.sv | 224 +++++ vendor/lowrisc_ip/common_ifs/common_ifs.core | 20 + vendor/lowrisc_ip/common_ifs/index.md | 81 ++ vendor/lowrisc_ip/common_ifs/pins_if.sv | 96 ++ vendor/lowrisc_ip/common_ifs/pins_if.svg | 1 + vendor/lowrisc_ip/common_ifs/pins_ifs.core | 17 + vendor/lowrisc_ip/csr_utils/README.md | 159 ++++ vendor/lowrisc_ip/csr_utils/csr_excl_item.sv | 111 +++ vendor/lowrisc_ip/csr_utils/csr_seq_lib.sv | 477 ++++++++++ vendor/lowrisc_ip/csr_utils/csr_utils.core | 21 + vendor/lowrisc_ip/csr_utils/csr_utils_pkg.sv | 571 ++++++++++++ vendor/lowrisc_ip/dv_lib/README.md | 56 ++ vendor/lowrisc_ip/dv_lib/dv_base_agent.sv | 60 ++ vendor/lowrisc_ip/dv_lib/dv_base_agent_cfg.sv | 20 + vendor/lowrisc_ip/dv_lib/dv_base_agent_cov.sv | 12 + vendor/lowrisc_ip/dv_lib/dv_base_driver.sv | 32 + vendor/lowrisc_ip/dv_lib/dv_base_env.sv | 60 ++ vendor/lowrisc_ip/dv_lib/dv_base_env_cfg.sv | 94 ++ vendor/lowrisc_ip/dv_lib/dv_base_env_cov.sv | 42 + vendor/lowrisc_ip/dv_lib/dv_base_mem.sv | 16 + vendor/lowrisc_ip/dv_lib/dv_base_monitor.sv | 35 + vendor/lowrisc_ip/dv_lib/dv_base_reg.sv | 69 ++ vendor/lowrisc_ip/dv_lib/dv_base_reg_block.sv | 56 ++ vendor/lowrisc_ip/dv_lib/dv_base_reg_field.sv | 42 + vendor/lowrisc_ip/dv_lib/dv_base_reg_map.sv | 9 + .../lowrisc_ip/dv_lib/dv_base_scoreboard.sv | 80 ++ vendor/lowrisc_ip/dv_lib/dv_base_seq.sv | 25 + vendor/lowrisc_ip/dv_lib/dv_base_sequencer.sv | 13 + vendor/lowrisc_ip/dv_lib/dv_base_test.sv | 76 ++ .../dv_lib/dv_base_virtual_sequencer.sv | 14 + vendor/lowrisc_ip/dv_lib/dv_base_vseq.sv | 187 ++++ vendor/lowrisc_ip/dv_lib/dv_lib.core | 42 + vendor/lowrisc_ip/dv_lib/dv_lib_pkg.sv | 51 + vendor/lowrisc_ip/dv_utils/README.md | 0 vendor/lowrisc_ip/dv_utils/dv_macros.svh | 288 ++++++ .../lowrisc_ip/dv_utils/dv_report_server.sv | 87 ++ vendor/lowrisc_ip/dv_utils/dv_utils.core | 23 + vendor/lowrisc_ip/dv_utils/dv_utils_pkg.sv | 145 +++ vendor/lowrisc_ip/dvsim/Deploy.py | 873 ++++++++++++++++++ vendor/lowrisc_ip/dvsim/FlowCfg.py | 628 +++++++++++++ vendor/lowrisc_ip/dvsim/LintCfg.py | 218 +++++ vendor/lowrisc_ip/dvsim/Modes.py | 547 +++++++++++ vendor/lowrisc_ip/dvsim/OneShotCfg.py | 188 ++++ vendor/lowrisc_ip/dvsim/SimCfg.py | 570 ++++++++++++ vendor/lowrisc_ip/dvsim/__init__.py | 0 vendor/lowrisc_ip/dvsim/dvsim.py | 525 +++++++++++ vendor/lowrisc_ip/dvsim/style.css | 134 +++ vendor/lowrisc_ip/dvsim/testplanner.py | 46 + vendor/lowrisc_ip/dvsim/testplanner/README.md | 237 +++++ .../lowrisc_ip/dvsim/testplanner/__init__.py | 0 .../dvsim/testplanner/class_defs.py | 340 +++++++ .../examples/common_testplan.hjson | 23 + .../dvsim/testplanner/examples/foo_dv_plan.md | 7 + .../examples/foo_regr_results.hjson | 107 +++ .../testplanner/examples/foo_testplan.hjson | 58 ++ .../dvsim/testplanner/testplan_utils.py | 171 ++++ vendor/lowrisc_ip/dvsim/utils.py | 299 ++++++ vendor/lowrisc_ip/uvmdvgen/Makefile.tpl | 43 + vendor/lowrisc_ip/uvmdvgen/README.md | 346 +++++++ vendor/lowrisc_ip/uvmdvgen/README.md.tpl | 3 + vendor/lowrisc_ip/uvmdvgen/__init__.py | 0 vendor/lowrisc_ip/uvmdvgen/agent.core.tpl | 33 + vendor/lowrisc_ip/uvmdvgen/agent.sv.tpl | 29 + vendor/lowrisc_ip/uvmdvgen/agent_cfg.sv.tpl | 15 + vendor/lowrisc_ip/uvmdvgen/agent_cov.sv.tpl | 18 + vendor/lowrisc_ip/uvmdvgen/agent_pkg.sv.tpl | 49 + vendor/lowrisc_ip/uvmdvgen/base_seq.sv.tpl | 18 + vendor/lowrisc_ip/uvmdvgen/base_test.sv.tpl | 32 + vendor/lowrisc_ip/uvmdvgen/base_vseq.sv.tpl | 39 + vendor/lowrisc_ip/uvmdvgen/bind.sv.tpl | 18 + vendor/lowrisc_ip/uvmdvgen/common_vseq.sv.tpl | 21 + vendor/lowrisc_ip/uvmdvgen/cov_excl.el.tpl | 6 + .../lowrisc_ip/uvmdvgen/device_driver.sv.tpl | 26 + vendor/lowrisc_ip/uvmdvgen/driver.sv.tpl | 37 + vendor/lowrisc_ip/uvmdvgen/dv_plan.md.tpl | 130 +++ vendor/lowrisc_ip/uvmdvgen/env.core.tpl | 39 + vendor/lowrisc_ip/uvmdvgen/env.sv.tpl | 52 ++ vendor/lowrisc_ip/uvmdvgen/env_cfg.sv.tpl | 52 ++ vendor/lowrisc_ip/uvmdvgen/env_cov.sv.tpl | 38 + vendor/lowrisc_ip/uvmdvgen/env_pkg.sv.tpl | 48 + vendor/lowrisc_ip/uvmdvgen/gen_agent.py | 62 ++ vendor/lowrisc_ip/uvmdvgen/gen_env.py | 70 ++ vendor/lowrisc_ip/uvmdvgen/host_driver.sv.tpl | 37 + vendor/lowrisc_ip/uvmdvgen/if.sv.tpl | 11 + vendor/lowrisc_ip/uvmdvgen/item.sv.tpl | 14 + vendor/lowrisc_ip/uvmdvgen/monitor.sv.tpl | 43 + vendor/lowrisc_ip/uvmdvgen/sanity_vseq.sv.tpl | 15 + vendor/lowrisc_ip/uvmdvgen/scoreboard.sv.tpl | 116 +++ vendor/lowrisc_ip/uvmdvgen/seq_list.sv.tpl | 5 + vendor/lowrisc_ip/uvmdvgen/sim.core.tpl | 28 + vendor/lowrisc_ip/uvmdvgen/sim_cfg.hjson.tpl | 80 ++ vendor/lowrisc_ip/uvmdvgen/tb.sv.tpl | 90 ++ vendor/lowrisc_ip/uvmdvgen/test.core.tpl | 19 + vendor/lowrisc_ip/uvmdvgen/test_pkg.sv.tpl | 26 + vendor/lowrisc_ip/uvmdvgen/testplan.hjson.tpl | 34 + vendor/lowrisc_ip/uvmdvgen/uvmdvgen.py | 148 +++ .../uvmdvgen/virtual_sequencer.sv.tpl | 21 + vendor/lowrisc_ip/uvmdvgen/vseq_list.sv.tpl | 7 + vendor/lowrisc_uvmdvgen.lock.hjson | 15 + vendor/lowrisc_uvmdvgen.vendor.hjson | 13 + 161 files changed, 12145 insertions(+), 28 deletions(-) create mode 100644 dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent.core create mode 100644 dv/uvm/core_ibex/common/mem_model/mem_model.core create mode 100644 dv/uvm/data/common_modes.hjson create mode 100644 dv/uvm/data/common_project_cfg.hjson create mode 100644 dv/uvm/data/common_sim_cfg.hjson create mode 100644 dv/uvm/data/fusesoc.hjson create mode 100644 dv/uvm/data/sim.mk create mode 100644 dv/uvm/data/vcs/cover.cfg create mode 100644 dv/uvm/data/vcs/vcs.hjson create mode 100644 dv/uvm/data/vcs/vcs_fsdb.tcl create mode 100644 dv/uvm/data/vcs/xprop.cfg create mode 100644 dv/uvm/data/xcelium/xcelium.hjson create mode 100644 dv/uvm/icache/data/ibex_icache_testplan.hjson create mode 100644 dv/uvm/icache/doc/ibex_icache_dv_plan.md create mode 100644 dv/uvm/icache/doc/tb.svg create mode 100644 dv/uvm/icache/dv/env/ibex_icache_env.core create mode 100644 dv/uvm/icache/dv/env/ibex_icache_env.sv create mode 100644 dv/uvm/icache/dv/env/ibex_icache_env_cfg.sv create mode 100644 dv/uvm/icache/dv/env/ibex_icache_env_cov.sv create mode 100644 dv/uvm/icache/dv/env/ibex_icache_env_pkg.sv create mode 100644 dv/uvm/icache/dv/env/ibex_icache_scoreboard.sv create mode 100644 dv/uvm/icache/dv/env/ibex_icache_virtual_sequencer.sv create mode 100644 dv/uvm/icache/dv/env/seq_lib/ibex_icache_base_vseq.sv create mode 100644 dv/uvm/icache/dv/env/seq_lib/ibex_icache_common_vseq.sv create mode 100644 dv/uvm/icache/dv/env/seq_lib/ibex_icache_sanity_vseq.sv create mode 100644 dv/uvm/icache/dv/env/seq_lib/ibex_icache_vseq_list.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/README.md create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.core create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cfg.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cov.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_pkg.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_driver.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_if.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_item.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_monitor.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_base_seq.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_seq_list.sv create mode 100644 dv/uvm/icache/dv/ibex_icache_sim.core create mode 100644 dv/uvm/icache/dv/ibex_icache_sim_cfg.hjson create mode 100644 dv/uvm/icache/dv/tb/tb.sv create mode 100644 dv/uvm/icache/dv/tests/ibex_icache_base_test.sv create mode 100644 dv/uvm/icache/dv/tests/ibex_icache_test.core create mode 100644 dv/uvm/icache/dv/tests/ibex_icache_test_pkg.sv create mode 100644 dv/uvm/icache/dv/top_pkg.core create mode 100644 dv/uvm/icache/dv/top_pkg.sv create mode 100644 ibex_icache.core create mode 100644 vendor/lowrisc_common_ifs.lock.hjson create mode 100644 vendor/lowrisc_common_ifs.vendor.hjson create mode 100644 vendor/lowrisc_csr_utils.lock.hjson create mode 100644 vendor/lowrisc_csr_utils.vendor.hjson create mode 100644 vendor/lowrisc_dv_lib.lock.hjson create mode 100644 vendor/lowrisc_dv_lib.vendor.hjson create mode 100644 vendor/lowrisc_dv_utils.lock.hjson create mode 100644 vendor/lowrisc_dv_utils.vendor.hjson create mode 100644 vendor/lowrisc_dvsim.lock.hjson create mode 100644 vendor/lowrisc_dvsim.vendor.hjson create mode 100644 vendor/lowrisc_ip/common_ifs/clk_if.sv create mode 100644 vendor/lowrisc_ip/common_ifs/clk_rst_if.sv create mode 100644 vendor/lowrisc_ip/common_ifs/common_ifs.core create mode 100644 vendor/lowrisc_ip/common_ifs/index.md create mode 100644 vendor/lowrisc_ip/common_ifs/pins_if.sv create mode 100644 vendor/lowrisc_ip/common_ifs/pins_if.svg create mode 100644 vendor/lowrisc_ip/common_ifs/pins_ifs.core create mode 100644 vendor/lowrisc_ip/csr_utils/README.md create mode 100644 vendor/lowrisc_ip/csr_utils/csr_excl_item.sv create mode 100644 vendor/lowrisc_ip/csr_utils/csr_seq_lib.sv create mode 100644 vendor/lowrisc_ip/csr_utils/csr_utils.core create mode 100644 vendor/lowrisc_ip/csr_utils/csr_utils_pkg.sv create mode 100644 vendor/lowrisc_ip/dv_lib/README.md create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_agent.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_agent_cfg.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_agent_cov.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_driver.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_env.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_env_cfg.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_env_cov.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_mem.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_monitor.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_reg.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_reg_block.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_reg_field.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_reg_map.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_scoreboard.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_seq.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_sequencer.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_test.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_virtual_sequencer.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_base_vseq.sv create mode 100644 vendor/lowrisc_ip/dv_lib/dv_lib.core create mode 100644 vendor/lowrisc_ip/dv_lib/dv_lib_pkg.sv create mode 100644 vendor/lowrisc_ip/dv_utils/README.md create mode 100644 vendor/lowrisc_ip/dv_utils/dv_macros.svh create mode 100644 vendor/lowrisc_ip/dv_utils/dv_report_server.sv create mode 100644 vendor/lowrisc_ip/dv_utils/dv_utils.core create mode 100644 vendor/lowrisc_ip/dv_utils/dv_utils_pkg.sv create mode 100644 vendor/lowrisc_ip/dvsim/Deploy.py create mode 100644 vendor/lowrisc_ip/dvsim/FlowCfg.py create mode 100644 vendor/lowrisc_ip/dvsim/LintCfg.py create mode 100644 vendor/lowrisc_ip/dvsim/Modes.py create mode 100644 vendor/lowrisc_ip/dvsim/OneShotCfg.py create mode 100644 vendor/lowrisc_ip/dvsim/SimCfg.py create mode 100644 vendor/lowrisc_ip/dvsim/__init__.py create mode 100755 vendor/lowrisc_ip/dvsim/dvsim.py create mode 100644 vendor/lowrisc_ip/dvsim/style.css create mode 100755 vendor/lowrisc_ip/dvsim/testplanner.py create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/README.md create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/__init__.py create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/class_defs.py create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/examples/common_testplan.hjson create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/examples/foo_dv_plan.md create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/examples/foo_regr_results.hjson create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/examples/foo_testplan.hjson create mode 100644 vendor/lowrisc_ip/dvsim/testplanner/testplan_utils.py create mode 100644 vendor/lowrisc_ip/dvsim/utils.py create mode 100644 vendor/lowrisc_ip/uvmdvgen/Makefile.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/README.md create mode 100644 vendor/lowrisc_ip/uvmdvgen/README.md.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/__init__.py create mode 100644 vendor/lowrisc_ip/uvmdvgen/agent.core.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/agent.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/agent_cfg.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/agent_cov.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/agent_pkg.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/base_seq.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/base_test.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/base_vseq.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/bind.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/common_vseq.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/cov_excl.el.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/device_driver.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/driver.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/dv_plan.md.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/env.core.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/env.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/env_cfg.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/env_cov.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/env_pkg.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/gen_agent.py create mode 100644 vendor/lowrisc_ip/uvmdvgen/gen_env.py create mode 100644 vendor/lowrisc_ip/uvmdvgen/host_driver.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/if.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/item.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/monitor.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/sanity_vseq.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/scoreboard.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/seq_list.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/sim.core.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/sim_cfg.hjson.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/tb.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/test.core.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/test_pkg.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/testplan.hjson.tpl create mode 100755 vendor/lowrisc_ip/uvmdvgen/uvmdvgen.py create mode 100644 vendor/lowrisc_ip/uvmdvgen/virtual_sequencer.sv.tpl create mode 100644 vendor/lowrisc_ip/uvmdvgen/vseq_list.sv.tpl create mode 100644 vendor/lowrisc_uvmdvgen.lock.hjson create mode 100644 vendor/lowrisc_uvmdvgen.vendor.hjson diff --git a/doc/verification.rst b/doc/verification.rst index 04badcd2..4821f0bd 100644 --- a/doc/verification.rst +++ b/doc/verification.rst @@ -1,17 +1,20 @@ Verification ============ -Overview --------- +Ibex Core +--------- -This is a SV/UVM testbench for verification of the Ibex core. +Overview +^^^^^^^^ + +This is a SV/UVM testbench for verification of the Ibex core, located in `dv/uvm/core_ibex`. At a high level, this testbench uses the open source `RISCV-DV random instruction generator `_ to generate compiled instruction binaries, loads them into a simple memory model, stimulates the Ibex core to run this program in memory, and then compares the core trace log against a golden model ISS trace log to check for correctness of execution. Testbench Architecture ----------------------- +^^^^^^^^^^^^^^^^^^^^^^ As previously mentioned, this testbench has been constructed based on its usage of the RISCV-DV random instruction generator developed by Google. @@ -23,28 +26,28 @@ A block diagram of the testbench is below. Architecture of the UVM testbench for Ibex core Memory Interface Agents -~~~~~~~~~~~~~~~~~~~~~~~ +""""""""""""""""""""""" -The code can be found in the `dv/uvm/common/ibex_mem_intf_agent -`_ directory. +The code can be found in the `dv/uvm/core_ibex/common/ibex_mem_intf_agent +`_ directory. Two of these agents are instantiated within the testbench, one for the instruction fetch interface, and the second for the LSU interface. These agents run slave sequences that wait for memory requests from the core, and then grant the requests for instructions or for data. Interrupt Interface Agent -~~~~~~~~~~~~~~~~~~~~~~~~~ +""""""""""""""""""""""""" The code can be found in the -`dv/uvm/common/irq_agent `_ directory. +`dv/uvm/core_ibex/common/irq_agent `_ directory. This agent is used to drive stimulus onto the Ibex core's interrupt pins randomly during test execution. Memory Model -~~~~~~~~~~~~ +"""""""""""" The code can be found in the -`dv/uvm/common/mem_model `_ +`dv/uvm/core_ibex/common/mem_model `_ directory. The testbench instantiates a single instance of this memory model that it loads the compiled assembly test program into at the beginning of each test. @@ -52,10 +55,10 @@ This acts as a unified instruction/data memory that serves all requests from bot memory interface agents. Test and Sequence Library -~~~~~~~~~~~~~~~~~~~~~~~~~ +""""""""""""""""""""""""" The code can be found in the -`dv/uvm/tests `_ directory. +`dv/uvm/core_ibex/tests `_ directory. The tests here are the main sources of external stimulus generation and checking for this testbench, as the memory interface slave sequences simply serve the core's memory requests. The tests here are all extended from ``core_ibex_base_test``, and coordinate the entire flow for a @@ -64,21 +67,21 @@ checking the Ibex core status during the test and dealing with test timeouts. The sequences here are used to drive interrupt and debug stimulus into the core. Testplan -~~~~~~~~ +"""""""" The goal of this bench is to fully verify the Ibex core with 100% coverage. This includes testing all RV32IMC instructions, privileged spec compliance, exception and interrupt testing, Debug Mode operation etc. -The complete test list can be found in the file `dv/uvm/riscv_dv_extension/testlist.yaml -`_. +The complete test list can be found in the file `dv/uvm/core_ibex/riscv_dv_extension/testlist.yaml +`_. Please note that verification is still a work in progress. Getting Started ---------------- +^^^^^^^^^^^^^^ Prerequisites & Environment Setup -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""""""""""""""""""""""""""""""""" In order to run the co-simulation flow, you'll need: @@ -119,7 +122,7 @@ you have installed the corresponding instruction set simulator) .. _riscv-toolchain-releases: https://github.com/lowRISC/lowrisc-toolchains/releases End-to-end RTL/ISS co-simulation flow -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +""""""""""""""""""""""""""""""""""""" .. figure:: images/dv-flow.png :alt: RTL/ISS co-simulation flow chart @@ -143,12 +146,12 @@ any analysis that is required to increase verification effectiveness. This mechanism is explained in detail at https://github.com/google/riscv-dv/blob/master/HANDSHAKE.md. As a sidenote, the signature address that this testbench uses for the handshaking is ``0x8ffffffc``. Additionally, as is mentioned in the RISCV-DV documentation of this handshake, a small set of API -tasks are provided in `dv/uvm/tests/core_ibex_base_test.sv -`_ to enable easy +tasks are provided in `dv/uvm/core_ibex/tests/core_ibex_base_test.sv +`_ to enable easy and efficient integration and usage of this mechanism in this test environment. To see how this handshake is used during real simulations, look in -`dv/uvm/tests/core_ibex_test_lib.sv -`_. +`dv/uvm/core_ibex/tests/core_ibex_test_lib.sv +`_. As can be seen, this mechanism is extensively used to provide runtime verification for situations involving external debug requests, interrupt assertions, and memory faults. To add another layer of correctness checking to the checking already provided by the handshake @@ -203,12 +206,55 @@ The entirety of this flow is controlled by the Makefile found at make COV=1 Run with a different RTL simulator -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +"""""""""""""""""""""""""""""""""" -You can add any compile/runtime options in `dv/uvm/yaml/simulator.yaml -`_. +You can add any compile/runtime options in `dv/uvm/core_ibex/yaml/simulator.yaml +`_. .. code-block:: bash # Use the new RTL simulator to run make ... SIMULATOR=xxx + + +Instruction Cache +----------------- + +Overview +^^^^^^^^ + +NOTE: Icache verification, as well as documentation, is still in very early stages. + +Due to the complexity of the instruction cache, a separate testbench is used to +ensure that full verification and coverage closure is performed on this module. +This testbench is located at `dv/uvm/icache/dv +`_. + +As Icache verification is being carried out as part of the OpenTitan open-source +project, the testbench derives from the `dv_lib UVM class library +`_, which is a set of extended UVM +classes that provides basic UVM testbench functionality and components. + +This DV environment will be compiled and simulated using the `dvsim simulation tool +`_. +The master ``.hjson`` file that controls simulation with ``dvsim`` can be found +at `dv/uvm/icache/dv/ibex_icache_sim_cfg.hjson +`_. +The associated testplan ``.hjson`` file is located at `dv/uvm/icache/data/ibex_icache_testplan.hjson +`_. +As this testbench is still in its infancy, it is currently only able to be compiled, as no tests or +sequences are implemented, nor are there any entries in the testplan file. +To build the testbench locally using the VCS simulator, run the following command from the root of +the Ibex repository: + +.. code-block:: bash + + ./vendor/lowrisc_ip/dvsim/dvsim.py dv/uvm/icache/dv/ibex_icache_sim_cfg.hjson --build-only + --skip-ral --purge --sr sim_out + +Specify the intended output directory using either the ``--sr`` or ``-scratch-root`` option. +The ``--skip-ral`` option is mandatory for building/simulating the Icache testbench, as it does not +have any CSRs, excluding this option will lead to build errors. +``--purge`` directs the tool to ``rm -rf`` the output directory before running the tool, this can be +removed if not desired. + diff --git a/dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent.core b/dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent.core new file mode 100644 index 00000000..bc93fd37 --- /dev/null +++ b/dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent.core @@ -0,0 +1,27 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:ibex_mem_intf_agent:0.1" +description: "IBEX DV UVM environment" +filesets: + files_dv: + depend: + - lowrisc:dv:mem_model + files: + - ibex_mem_intf.sv + - ibex_mem_intf_agent_pkg.sv + - ibex_mem_intf_master_agent.sv: {is_include_file: true} + - ibex_mem_intf_master_driver.sv: {is_include_file: true} + - ibex_mem_intf_monitor.sv: {is_include_file: true} + - ibex_mem_intf_seq_item.sv: {is_include_file: true} + - ibex_mem_intf_slave_agent.sv: {is_include_file: true} + - ibex_mem_intf_slave_driver.sv: {is_include_file: true} + - ibex_mem_intf_slave_seq_lib.sv: {is_include_file: true} + - ibex_mem_intf_slave_sequencer.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent_pkg.sv b/dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent_pkg.sv index e4a65e68..d6140938 100644 --- a/dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent_pkg.sv +++ b/dv/uvm/core_ibex/common/ibex_mem_intf_agent/ibex_mem_intf_agent_pkg.sv @@ -2,7 +2,6 @@ // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 -`include "ibex_mem_intf.sv" package ibex_mem_intf_agent_pkg; diff --git a/dv/uvm/core_ibex/common/mem_model/mem_model.core b/dv/uvm/core_ibex/common/mem_model/mem_model.core new file mode 100644 index 00000000..890bab60 --- /dev/null +++ b/dv/uvm/core_ibex/common/mem_model/mem_model.core @@ -0,0 +1,20 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:mem_model" +description: "DV Memory Model" + +filesets: + files_dv: + depend: + - lowrisc:dv:dv_utils + files: + - mem_model_pkg.sv + - mem_model.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/dv/uvm/data/common_modes.hjson b/dv/uvm/data/common_modes.hjson new file mode 100644 index 00000000..2779acb6 --- /dev/null +++ b/dv/uvm/data/common_modes.hjson @@ -0,0 +1,38 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + // Sim modes are collection of build_opts and run_opts + // These are only set on the command line + // These are different from the build modes in the sense that these collection of + // options are appended to actual build_modes + build_modes: [ + { + name: waves + is_sim_mode: 1 + en_build_modes: ["{tool}_waves"] + } + { + name: cov + is_sim_mode: 1 + en_build_modes: ["{tool}_cov"] + } + { + name: profile + is_sim_mode: 1 + en_build_modes: ["{tool}_profile"] + } + { + name: xprop + is_sim_mode: 1 + en_build_modes: ["{tool}_xprop"] + } + ] + + run_modes: [ + { + name: uvm_trace + run_opts: ["+UVM_PHASE_TRACE", "+UVM_CONFIG_DB_TRACE", "+UVM_OBJECTION_TRACE"] + } + ] +} diff --git a/dv/uvm/data/common_project_cfg.hjson b/dv/uvm/data/common_project_cfg.hjson new file mode 100644 index 00000000..f7160a9b --- /dev/null +++ b/dv/uvm/data/common_project_cfg.hjson @@ -0,0 +1,29 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + project: opentitan + doc_server: docs.opentitan.org + results_server: reports.opentitan.org + + // Default directory structure for the output + scratch_base_path: "{scratch_root}/{dut}.{flow}.{tool}" + scratch_path: "{scratch_base_path}/{branch}" + tool_srcs_dir: "{scratch_path}/{tool}" + + // Results server stuff - indicate what command to use to copy over the results. + // Workaround for gsutil to fall back to using python2.7. + results_server_prefix: "gs://" + results_server_url_prefix: "https://" + results_server_cmd: "CLOUDSDK_PYTHON=/usr/bin/python2.7 /usr/bin/gsutil" + results_server_css_path: "{results_server_url_prefix}{results_server}/css/style.css" + + results_server_path: "{results_server_prefix}{results_server}/{rel_path}" + results_server_dir: "{results_server_path}/latest" + + results_server_html: "results.html" + results_server_page: "{results_server_dir}/{results_server_html}" + + results_summary_server_html: "summary.html" + results_summary_server_page: "{results_server_path}/{results_summary_server_html}" +} diff --git a/dv/uvm/data/common_sim_cfg.hjson b/dv/uvm/data/common_sim_cfg.hjson new file mode 100644 index 00000000..541775c9 --- /dev/null +++ b/dv/uvm/data/common_sim_cfg.hjson @@ -0,0 +1,108 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + project: opentitan + doc_server: docs.opentitan.org + results_server: reports.opentitan.org + + flow: sim + flow_makefile: "{proj_root}/dv/uvm/data/sim.mk" + + import_cfgs: ["{proj_root}/dv/uvm/data/common_project_cfg.hjson", + "{proj_root}/dv/uvm/data/common_modes.hjson", + "{proj_root}/dv/uvm/data/fusesoc.hjson", + "{proj_root}/dv/uvm/data/{tool}/{tool}.hjson"] + + // Default directory structure for the output + build_dir: "{scratch_path}/{build_mode}" + run_dir_name: "{index}.{test}" + run_dir: "{scratch_path}/{run_dir_name}/out" + sw_build_dir: "" + sw_root_dir: "" + + // pass and fail patterns + build_pass_patterns: [] + build_fail_patterns: [] + run_pass_patterns: ["^TEST PASSED (UVM_)?CHECKS$"] + run_fail_patterns: ["^UVM_ERROR\\s[^:].*$", + "^UVM_FATAL\\s[^:].*$", + "^Assert failed: ", + "^\\s*Offending '.*'", + "^TEST FAILED (UVM_)?CHECKS$"] + + // Default TileLink widths + tl_aw: 32 + tl_dw: 32 + tl_dbw: 4 + + // Default UVM verbosity settings + n: UVM_NONE + l: UVM_LOW + m: UVM_MEDIUM + h: UVM_HIGH + d: UVM_DEBUG + + // Default waves dump settings + dump_file: waves.{dump} + + // Top level simulation entities. + sim_tops: ["-top {tb}"] + + // Default build and run opts + build_opts: [// List multiple tops for the simulation + "{sim_tops}", + // Standard UVM defines + "+define+UVM", + "+define+UVM_NO_DEPRECATED", + "+define+UVM_REGEX_NO_DPI", + "+define+UVM_REG_ADDR_WIDTH={tl_aw}", + "+define+UVM_REG_DATA_WIDTH={tl_dw}", + "+define+UVM_REG_BYTENABLE_WIDTH={tl_dbw}"] + + run_opts: ["+UVM_NO_RELNOTES", + "+UVM_VERBOSITY={verbosity}"] + + // Default list of things to export to shell + exports: [ + DUMP_FILE: {dump_file} + WAVES: {waves} + DUT_TOP: {dut} + TB_TOP: {tb} + ] + + // Regressions are tests that can be grouped together and run in one shot + // By default, two regressions are made available - "all" and "nightly". Both + // run all available tests for the DUT. "nightly" enables coverage as well. + // The 'tests' key is set to an empty list, which indicates "run everything". + // Test sets can enable sim modes, which are a set of build_opts and run_opts + // that are grouped together. These are appended to the build modes used by the + // tests. + regressions: [ + { + name: sanity + reseed: 1 + } + + { + name: all + tests: [] + } + + { + name: all_once + reseed: 1 + tests: [] + } + + { + name: nightly + tests: [] + en_sim_modes: ["cov"] + } + ] + + // Project defaults for VCS + vcs_cov_hier: "-cm_hier {tool_srcs_dir}/cover.cfg" + vcs_cov_excl_files: [] +} diff --git a/dv/uvm/data/fusesoc.hjson b/dv/uvm/data/fusesoc.hjson new file mode 100644 index 00000000..d2ca94bf --- /dev/null +++ b/dv/uvm/data/fusesoc.hjson @@ -0,0 +1,12 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + sv_flist_gen_cmd: fusesoc + fusesoc_core_: "{eval_cmd} echo \"{fusesoc_core}\" | tr ':' '_'" + sv_flist_gen_opts: ["--cores-root {proj_root}", + "run --target=sim --build-root={build_dir}", + "--setup {fusesoc_core}"] + sv_flist_gen_dir: "{build_dir}/sim-vcs" + sv_flist: "{sv_flist_gen_dir}/{fusesoc_core_}.scr" +} diff --git a/dv/uvm/data/sim.mk b/dv/uvm/data/sim.mk new file mode 100644 index 00000000..65727a27 --- /dev/null +++ b/dv/uvm/data/sim.mk @@ -0,0 +1,95 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +.DEFAULT_GOAL := all + +all: build run + +######################## +## RAL target ## +######################## +ral: +ifneq (${skip_ral},1) + mkdir -p ${gen_ral_pkg_dir} && \ + ${gen_ral_pkg_cmd} ${gen_ral_pkg_opts} +endif + + +############################### +## sim build and run targets ## +############################### +build: compile_result + +pre_compile: + @echo "[make]: pre_compile" + mkdir -p ${build_dir} && env | sort > ${build_dir}/env_vars + mkdir -p ${tool_srcs_dir} + cp -Ru ${tool_srcs} ${tool_srcs_dir}/. + +gen_sv_flist: pre_compile ral + @echo "[make]: gen_sv_flist" + cd ${build_dir} && ${sv_flist_gen_cmd} ${sv_flist_gen_opts} + +compile: gen_sv_flist + @echo "[make]: compile" + cd ${sv_flist_gen_dir} && ${build_cmd} ${build_opts} + +post_compile: compile + @echo "[make]: post_compile" + +compile_result: post_compile + @echo "[make]: compile_result" + +run: run_result + +pre_run: + @echo "[make]: pre_run" + mkdir -p ${run_dir} && env | sort > ${run_dir}/env_vars + +simulate: + @echo "[make]: simulate" + cd ${run_dir} && ${run_cmd} ${run_opts} + +post_run: simulate + @echo "[make]: post_run" + +run_result: post_run + @echo "[make]: run_result" + +####################### +## Load waves target ## +####################### +debug_waves: + ${debug_waves_cmd} ${debug_waves_opts} + +############################ +## coverage rated targets ## +############################ +# Merge coverage if there are multiple builds. +cov_merge: + ${cov_merge_cmd} ${cov_merge_opts} + +# Open coverage tool to review and create report or exclusion file. +cov_analyze: + ${cov_analyze_cmd} ${cov_analyze_opts} + +# Generate coverage reports. +cov_report: + ${cov_report_cmd} ${cov_report_opts} + +clean: + echo "[make]: clean" + rm -rf ${scratch_root}/${dut}/* + +.PHONY: build \ + run \ + reg \ + pre_compile \ + compile \ + post_compile \ + compile_result \ + pre_run \ + simulate \ + post_run \ + run_result diff --git a/dv/uvm/data/vcs/cover.cfg b/dv/uvm/data/vcs/cover.cfg new file mode 100644 index 00000000..875dd0f1 --- /dev/null +++ b/dv/uvm/data/vcs/cover.cfg @@ -0,0 +1,5 @@ ++tree tb.dut +begin tgl(portsonly) + -tree tb + +tree tb.dut 1 +end diff --git a/dv/uvm/data/vcs/vcs.hjson b/dv/uvm/data/vcs/vcs.hjson new file mode 100644 index 00000000..6e48b197 --- /dev/null +++ b/dv/uvm/data/vcs/vcs.hjson @@ -0,0 +1,156 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + build_cmd: "{job_prefix} vcs" + build_ex: "{build_dir}/simv" + run_cmd: "{job_prefix} {build_ex}" + + // Indicate the tool specific helper sources - these are copied over to the + // {tool_srcs_dir} before running the simulation. + tool_srcs: ["{proj_root}/dv/uvm/data/vcs/*"] + + build_opts: ["-sverilog -full64 -licqueue -kdb -ntb_opts uvm-1.2", + "-timescale=1ns/1ps", + "-Mdir={build_ex}.csrc", + "-o {build_ex}", + "-f {sv_flist}", + "+incdir+{build_dir}", + // Turn on warnings for non-void functions called with return values ignored + "+warn=SV-NFIVC", + "+warn=noUII-L", + // Below option required for $error/$fatal system calls + "-assert svaext", + // Force DPI-C compilation in C99 mode + "-CFLAGS \"--std=c99\"", + // Without this magic LDFLAGS argument below, we get compile time errors with + // VCS on Google Linux machines that look like this: + // .../libvcsnew.so: undefined reference to `snpsReallocFunc' + // .../libvcsnew.so: undefined reference to `snpsCheckStrdupFunc' + // .../libvcsnew.so: undefined reference to `snpsGetMemBytes' + "-LDFLAGS \"-Wl,--no-as-needed\""] + + run_opts: ["-licqueue", + "-ucli -do {tool_srcs_dir}/vcs_fsdb.tcl", + "+ntb_random_seed={seed}", + "+UVM_TESTNAME={uvm_test}", + "+UVM_TEST_SEQ={uvm_test_seq}"] + + // Coverage related. + cov_db_dir: "{build_dir}/cov.vdb" + + // Individual test specific coverage data - this will be deleted if the test fails + // so that coverage from failiing tests is not included in the final report. + cov_db_test_dir_name: "{run_dir_name}.{seed}" + cov_db_test_dir: "{cov_db_dir}/snps/coverage/db/testdata/{cov_db_test_dir_name}" + + // Merging coverage. + // "cov_db_dirs" is a special variable that appends all build directories in use. + // It is constructed by the tool itself. + cov_merge_dir: "{scratch_base_path}/cov_merge" + cov_merge_db_dir: "{cov_merge_dir}/merged.vdb" + cov_merge_cmd: "{job_prefix} urg" + cov_merge_opts: ["-full64", + "+urg+lic+wait", + "-nocheck", + "-noreport", + "-flex_merge drop", + "-group merge_across_scopes", + "-parallel", + "-parallel_split 20", + // Use cov_db_dirs var for dir args; append -dir in front of each + '''{eval_cmd} dirs=`echo {cov_db_dirs}`; dir_args=; \ + for d in $dirs; do dir_args="$dir_args -dir $d"; done; \ + echo $dir_args + ''', + "-dbname {cov_merge_db_dir}"] + + // Generate coverage reports in text as well as html. + cov_report_dir: "{scratch_base_path}/cov_report" + cov_report_cmd: "{job_prefix} urg" + cov_report_opts: ["-full64", + "+urg+lic+wait", + "-dir {cov_merge_db_dir}", + "-group instcov_for_score", + "-line nocasedef", + "-format both", + "-report {cov_report_dir}"] + cov_report_dashboard: "{cov_report_dir}/dashboard.txt" + + // Analyzing coverage - this is done by invoking --cov-analyze switch. It opens up the + // GUI for visual analysis. + cov_analyze_dir: "{scratch_base_path}/cov_analyze" + cov_analyze_cmd: "{job_prefix} verdi" + cov_analyze_opts: ["-cov", + "-covdir {cov_merge_db_dir}", + "-line nocasedef" + "-elfile {vcs_cov_excl_files}"] + + // Vars that need to exported to the env. + exports: [ + VCS_ARCH_OVERRIDE: linux + VCS_LIC_EXPIRE_WARNING: 1 + ] + + // Defaults for VCS + cov_metrics: "line+cond+fsm+tgl+branch+assert" + vcs_cov_hier: "" + vcs_cov_assert_hier: "" + vcs_cov_excl_files: [] + + // pass and fail patterns + build_fail_patterns: ["^Error-.*$"] + run_fail_patterns: ["^Error-.*$"] // Null pointer error + + build_modes: [ + { + name: vcs_waves + is_sim_mode: 1 + build_opts: ["-debug_access+all"] + } + { + name: vcs_cov + is_sim_mode: 1 + build_opts: [// Enable the required cov metrics + "-cm {cov_metrics}", + // Set the coverage hierarchy + "{vcs_cov_hier}", + // Cover all continuous assignments + "-cm_line contassign", + // Dump toggle coverage on mdas, array of structs and on ports only + "-cm_tgl mda+structarr+portsonly", + // Ignore initial blocks for coverage + "-cm_report noinitial", + // Filter unreachable/statically constant blocks + "-cm_noconst", + // Don't count coverage that's coming from zero-time glitches + "-cm_glitch 0", + // Ignore warnings about not applying cm_glitch to path and FSM + "+warn=noVCM-OPTIGN", + // Coverage database output location + "-cm_dir {cov_db_dir}"] + + run_opts: [// Enable the required cov metrics + "-cm {cov_metrics}", + // Same directory as build + "-cm_dir {build_dir}/cov.vdb", + // Don't output cm.log which can be quite large + "-cm_log /dev/null", + // Provide a name to the coverage collected for this test + "-cm_name {cov_db_test_dir_name}", + // Don't dump all the coverage assertion attempts at the end of simulation + "-assert nopostproc"] + } + { + name: vcs_xprop + is_sim_mode: 1 + build_opts: ["-xprop={tool_srcs_dir}/xprop.cfg"] + } + { + name: vcs_profile + is_sim_mode: 1 + build_opts: ["-simprofile"] + run_opts: ["-simprofile {profile}"] + } + ] +} diff --git a/dv/uvm/data/vcs/vcs_fsdb.tcl b/dv/uvm/data/vcs/vcs_fsdb.tcl new file mode 100644 index 00000000..01ebab34 --- /dev/null +++ b/dv/uvm/data/vcs/vcs_fsdb.tcl @@ -0,0 +1,44 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +# TCL file invoked from VCS's simv at run-time using this: -ucli -do + +if {[info exists ::env(WAVES)]} { +# Use FSDB for dumping only if Verdi is set up + +# Syntax: fsdbDumpvars [depth] [instance] [option]* +############################################################################## +# Option Description +############################################################################## +# +mda Dumps memory and MDA signals in all scopes. +# +packedmda Dumps packed signals +# +struct Dumps structs +# +skip_cell_instance=mode Enables or disables cell dumping +# +strength Enables strength dumping +# +parameter Dumps parameters +# +power Dumps power-related signals +# +trace_process Dumps VHDL processes +# +no_functions Disables dumping of functions +# +sva Dumps assertions +# +Reg_Only Dumps only reg type signals +# +IO_Only Dumps only IO port signals +# +by_file= File to specify objects to add +# +all Dumps memories, MDA signals, structs, unions,power, and packed structs + + if {$::env(WAVES) == 1} { + if { [info exists ::env(VERDI_HOME)] } { + fsdbDumpfile $::env(DUMP_FILE) + fsdbDumpvars 0 $::env(TB_TOP) +all + fsdbDumpSVA 0 $::env(TB_TOP) + } else { + # Verdi is not set up, so use standard dumping format + set dump_file $::env(DUMP_FILE) + dump -file "${dump_file}" + dump -add { tb } -depth 0 -aggregates -scope "." + } + } +} + +run +quit diff --git a/dv/uvm/data/vcs/xprop.cfg b/dv/uvm/data/vcs/xprop.cfg new file mode 100644 index 00000000..552a1849 --- /dev/null +++ b/dv/uvm/data/vcs/xprop.cfg @@ -0,0 +1,4 @@ +merge = xmerge; + +// Turn on xprop for dut only +instance { tb.dut } { xpropOn }; diff --git a/dv/uvm/data/xcelium/xcelium.hjson b/dv/uvm/data/xcelium/xcelium.hjson new file mode 100644 index 00000000..7326ad06 --- /dev/null +++ b/dv/uvm/data/xcelium/xcelium.hjson @@ -0,0 +1,84 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + build_cmd: "{job_prefix} xrun" + run_cmd: "{job_prefix} xrun" + + // Indicate the tool specific helper sources - these are copied over to the + // {tool_srcs_dir} before running the simulation. + tool_srcs: ["{proj_root}/dv/uvm/data/xcelium/*"] + + build_opts: [" -elaborate -64bit -access +r -sv", + "-messages -errormax 50", + "-timescale 1ns/1ps", + "-f {sv_flist}", + "-uvmhome {UVM_HOME}", + "-xmlibdirname {build_dir}/xcelium.d"] + + run_opts: ["-input {tool_srcs_dir}/xcelium_{dump}.tcl", + "-64bit -xmlibdirname {build_dir}/xcelium.d -R", + "+SVSEED={seed}", + "+UVM_TESTNAME={uvm_test}", + "+UVM_TEST_SEQ={uvm_test_seq}"] + + // Coverage related. + // TODO: These options have to be filled in. + cov_db_dir: "" + + // Individual test specific coverage data - this will be deleted if the test fails + // so that coverage from failiing tests is not included in the final report. + cov_db_test_dir_name: "{run_dir_name}.{seed}" + cov_db_test_dir: "" + + // Merging coverage. + // "cov_db_dirs" is a special variable that appends all build directories in use. + // It is constructed by the tool itself. + cov_merge_dir: "{scratch_base_path}/cov_merge" + cov_merge_db_dir: "" + cov_merge_cmd: "" + cov_merge_opts: [] + + // Generate covreage reports in text as well as html. + cov_report_dir: "{scratch_base_path}/cov_report" + cov_report_cmd: "" + cov_report_opts: [] + cov_report_dashboard: "" + + // Analyzing coverage - this is done by invoking --cov-analyze switch. It opens up the + // GUI for visual analysis. + cov_analyze_dir: "{scratch_base_path}/cov_analyze" + cov_analyze_cmd: "" + cov_analyze_opts: [] + + // pass and fail patterns + build_fail_patterns: ["\\*E.*$"] + run_fail_patterns: ["\\*E.*$"] // Null pointer error + + build_modes: [ + { + name: xcelium_waves + is_sim_mode: 1 + } + // TODO support coverage for xcelium + { + name: xcelium_cov + is_sim_mode: 1 + build_opts: [] + run_opts: [] + } + // TODO support profile for xcelium + { + name: xcelium_profile + is_sim_mode: 1 + build_opts: [] + run_opts: [] + } + { + name: xcelium_xprop + is_sim_mode: 1 + # -xverbose << add to see which modules does not have xprop enabled + build_opts: ["-xprop F"] + } + ] +} diff --git a/dv/uvm/icache/data/ibex_icache_testplan.hjson b/dv/uvm/icache/data/ibex_icache_testplan.hjson new file mode 100644 index 00000000..11a8a16f --- /dev/null +++ b/dv/uvm/icache/data/ibex_icache_testplan.hjson @@ -0,0 +1,30 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "ibex_icache" + + entries: [ + { + name: sanity + desc: '''**Goal**: Basic sanity test acessing a major datapath in IBEX_ICACHE. + + **Stimulus**: Describe the stimulus procedure. + + **Checks**": Describe the self-check procedure. + - add bullets as needed + - second bullet
+ describe second bullet + + Start a new paragraph.''' + milestone: V1 + tests: ["ibex_icache_sanity"] + } + { + name: feature1 + desc: '''Add more test entries here like above.''' + milestone: V1 + tests: [] + } + ] +} diff --git a/dv/uvm/icache/doc/ibex_icache_dv_plan.md b/dv/uvm/icache/doc/ibex_icache_dv_plan.md new file mode 100644 index 00000000..851d1757 --- /dev/null +++ b/dv/uvm/icache/doc/ibex_icache_dv_plan.md @@ -0,0 +1,99 @@ +--- +title: "IBEX_ICACHE DV Plan" +--- + +## Goals +* **DV** + * Verify all IBEX_ICACHE IP features by running dynamic simulations with a SV/UVM based testbench + * Develop and run all tests based on the [testplan](#testplan) below towards closing code and functional coverage on the IP and all of its sub-modules +* **FPV** + +## Current status + +* [Design & verification stage]({{< relref "doc/project/hw_dashboard" >}}) + * [HW development stages]({{< relref "doc/project/hw_stages" >}}) +* [Simulation results](https://reports.opentitan.org/hw/ip/ibex_icache/dv/latest/results.html) + +## Design features +For detailed information on IBEX_ICACHE design features, please see the [IBEX_ICACHE technical specification]({{< relref "doc/icache.rst" >}}). + +## Testbench architecture +IBEX_ICACHE testbench has been constructed based on the [DV_LIB testbench architecture]({{< relref "vendor/lowrisc_ip/dv_lib/" >}}). + +### Block diagram +![Block diagram](tb.svg) + +### Top level testbench +Top level testbench is located at `dv/uvm/icache/dv/tb/tb.sv`. It instantiates the IBEX_ICACHE DUT module `rtl/ibex_icache.sv`. +In addition, it instantiates the following interfaces, connects them to the DUT and sets their handle into `uvm_config_db`: +* [Clock and reset interface]({{< relref "vendor/lowrisc_ip/common_ifs" >}}) + +* IBEX_ICACHE IOs + +### Common DV utility components +The following utilities provide generic helper tasks and functions to perform activities that are common across the project: +* [dv_utils_pkg]({{< relref "vendor/lowrisc_ip/dv_utils/README.md" >}}) + +### Compile-time configurations +[list compile time configurations, if any and what are they used for] + +### Global types & methods +All common types and methods defined at the package level can be found in +`ibex_icache_env_pkg`. Some of them in use are: +```systemverilog +[list a few parameters, types & methods; no need to mention all] +``` + +### IBEX_ICACHE Agent +[Describe here or add link to its README] +### IBEX_MEM_INTF_SLAVE Agent +[Describe here or add link to its README] + +### UVC/agent 1 +[Describe here or add link to its README] + +### UVC/agent 2 +[Describe here or add link to its README] + + +### Reference models +[Describe reference models in use if applicable, example: SHA256/HMAC] + +### Stimulus strategy +#### Test sequences +All test sequences reside in `dv/uvm/icache/dv/env/seq_lib`. +The `ibex_icache_base_vseq` virtual sequence is extended from `cip_base_vseq` and serves as a starting point. +All test sequences are extended from `ibex_icache_base_vseq`. +It provides commonly used handles, variables, functions and tasks that the test sequences can simple use / call. +Some of the most commonly used tasks / functions are as follows: +* task 1: +* task 2: + +#### Functional coverage +To ensure high quality constrained random stimulus, it is necessary to develop a functional coverage model. +The following covergroups have been developed to prove that the test intent has been adequately met: +* cg1: +* cg2: + +### Self-checking strategy +#### Scoreboard +The `ibex_icache_scoreboard` is primarily used for end to end checking. +It creates the following analysis ports to retrieve the data monitored by corresponding interface agents: +* analysis port1: +* analysis port2: + + +#### Assertions + + +## Building and running tests +We are using our in-house developed [regression tool]({{< relref "vendor/lowrisc_ip/dvsim" >}}) for building and running our tests and regressions. +Please take a look at the link for detailed information on the usage, capabilities, features and known issues. +Here's how to run a basic sanity test: +```console +$ cd hw/ip/ibex_icache/dv +$ make TEST_NAME=ibex_icache_sanity +``` + +## Testplan +{{}} diff --git a/dv/uvm/icache/doc/tb.svg b/dv/uvm/icache/doc/tb.svg new file mode 100644 index 00000000..233330de --- /dev/null +++ b/dv/uvm/icache/doc/tb.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/dv/uvm/icache/dv/env/ibex_icache_env.core b/dv/uvm/icache/dv/env/ibex_icache_env.core new file mode 100644 index 00000000..885b2f5e --- /dev/null +++ b/dv/uvm/icache/dv/env/ibex_icache_env.core @@ -0,0 +1,29 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:ibex_icache_env:0.1" +description: "IBEX_ICACHE DV UVM environment" +filesets: + files_dv: + depend: + - lowrisc:dv:dv_lib + - lowrisc:dv:ibex_icache_agent + - lowrisc:dv:ibex_mem_intf_agent + files: + - ibex_icache_env_pkg.sv + - ibex_icache_env_cfg.sv: {is_include_file: true} + - ibex_icache_env_cov.sv: {is_include_file: true} + - ibex_icache_virtual_sequencer.sv: {is_include_file: true} + - ibex_icache_scoreboard.sv: {is_include_file: true} + - ibex_icache_env.sv: {is_include_file: true} + - seq_lib/ibex_icache_vseq_list.sv: {is_include_file: true} + - seq_lib/ibex_icache_base_vseq.sv: {is_include_file: true} + - seq_lib/ibex_icache_common_vseq.sv: {is_include_file: true} + - seq_lib/ibex_icache_sanity_vseq.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/dv/uvm/icache/dv/env/ibex_icache_env.sv b/dv/uvm/icache/dv/env/ibex_icache_env.sv new file mode 100644 index 00000000..1252116f --- /dev/null +++ b/dv/uvm/icache/dv/env/ibex_icache_env.sv @@ -0,0 +1,41 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_env extends dv_base_env #( + .CFG_T (ibex_icache_env_cfg), + .COV_T (ibex_icache_env_cov), + .VIRTUAL_SEQUENCER_T(ibex_icache_virtual_sequencer), + .SCOREBOARD_T (ibex_icache_scoreboard) + ); + `uvm_component_utils(ibex_icache_env) + + ibex_icache_agent m_ibex_icache_agent; + ibex_mem_intf_slave_agent m_ibex_mem_intf_slave_agent; + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + // create components + m_ibex_icache_agent = ibex_icache_agent::type_id::create("m_ibex_icache_agent", this); + uvm_config_db#(ibex_icache_agent_cfg)::set(this, "m_ibex_icache_agent*", "cfg", cfg.m_ibex_icache_agent_cfg); + // create components + m_ibex_mem_intf_slave_agent = ibex_mem_intf_slave_agent::type_id::create("m_ibex_mem_intf_slave_agent", this); + endfunction + + function void connect_phase(uvm_phase phase); + super.connect_phase(phase); + if (cfg.en_scb) begin + m_ibex_icache_agent.monitor.analysis_port.connect(scoreboard.ibex_icache_fifo.analysis_export); + m_ibex_mem_intf_slave_agent.monitor.addr_ph_port.connect(scoreboard.ibex_mem_intf_slave_fifo.analysis_export); + end + if (cfg.is_active && cfg.m_ibex_icache_agent_cfg.is_active) begin + virtual_sequencer.ibex_icache_sequencer_h = m_ibex_icache_agent.sequencer; + end + if (cfg.is_active && m_ibex_mem_intf_slave_agent.get_is_active()) begin + virtual_sequencer.ibex_mem_intf_slave_sequencer_h = m_ibex_mem_intf_slave_agent.sequencer; + end + endfunction + +endclass diff --git a/dv/uvm/icache/dv/env/ibex_icache_env_cfg.sv b/dv/uvm/icache/dv/env/ibex_icache_env_cfg.sv new file mode 100644 index 00000000..c8b8dd1b --- /dev/null +++ b/dv/uvm/icache/dv/env/ibex_icache_env_cfg.sv @@ -0,0 +1,23 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_env_cfg extends dv_base_env_cfg; + + // ext component cfgs + rand ibex_icache_agent_cfg m_ibex_icache_agent_cfg; + + `uvm_object_utils_begin(ibex_icache_env_cfg) + `uvm_field_object(m_ibex_icache_agent_cfg, UVM_DEFAULT) + `uvm_object_utils_end + + `uvm_object_new + + + virtual function void initialize(bit [TL_AW-1:0] csr_base_addr = '1); + // create ibex_icache agent config obj + m_ibex_icache_agent_cfg = ibex_icache_agent_cfg::type_id::create("m_ibex_icache_agent_cfg"); + // create ibex_mem_intf_slave agent config obj + endfunction + +endclass diff --git a/dv/uvm/icache/dv/env/ibex_icache_env_cov.sv b/dv/uvm/icache/dv/env/ibex_icache_env_cov.sv new file mode 100644 index 00000000..1ba11079 --- /dev/null +++ b/dv/uvm/icache/dv/env/ibex_icache_env_cov.sv @@ -0,0 +1,30 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +/** + * Covergoups that are dependent on run-time parameters that may be available + * only in build_phase can be defined here + * Covergroups may also be wrapped inside helper classes if needed. + */ + +class ibex_icache_env_cov extends dv_base_env_cov #(.CFG_T(ibex_icache_env_cfg)); + `uvm_component_utils(ibex_icache_env_cov) + + // the base class provides the following handles for use: + // ibex_icache_env_cfg: cfg + + // covergroups + // [add covergroups here] + + function new(string name, uvm_component parent); + super.new(name, parent); + // [instantiate covergroups here] + endfunction : new + + virtual function void build_phase(uvm_phase phase); + super.build_phase(phase); + // [or instantiate covergroups here] + endfunction + +endclass diff --git a/dv/uvm/icache/dv/env/ibex_icache_env_pkg.sv b/dv/uvm/icache/dv/env/ibex_icache_env_pkg.sv new file mode 100644 index 00000000..eb941be7 --- /dev/null +++ b/dv/uvm/icache/dv/env/ibex_icache_env_pkg.sv @@ -0,0 +1,33 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package ibex_icache_env_pkg; + // dep packages + import uvm_pkg::*; + import top_pkg::*; + import dv_utils_pkg::*; + import ibex_icache_agent_pkg::*; + import ibex_mem_intf_agent_pkg::*; + import dv_lib_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // parameters + + // types + typedef dv_base_reg_block ibex_icache_reg_block; + + // functions + + // package sources + `include "ibex_icache_env_cfg.sv" + `include "ibex_icache_env_cov.sv" + `include "ibex_icache_virtual_sequencer.sv" + `include "ibex_icache_scoreboard.sv" + `include "ibex_icache_env.sv" + `include "ibex_icache_vseq_list.sv" + +endpackage diff --git a/dv/uvm/icache/dv/env/ibex_icache_scoreboard.sv b/dv/uvm/icache/dv/env/ibex_icache_scoreboard.sv new file mode 100644 index 00000000..ac3a2a0f --- /dev/null +++ b/dv/uvm/icache/dv/env/ibex_icache_scoreboard.sv @@ -0,0 +1,67 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_scoreboard extends dv_base_scoreboard #( + .CFG_T(ibex_icache_env_cfg), + .COV_T(ibex_icache_env_cov) + ); + `uvm_component_utils(ibex_icache_scoreboard) + + // local variables + + // TLM agent fifos + uvm_tlm_analysis_fifo #(ibex_icache_item) ibex_icache_fifo; + uvm_tlm_analysis_fifo #(ibex_mem_intf_seq_item) ibex_mem_intf_slave_fifo; + + // local queues to hold incoming packets pending comparison + ibex_icache_item ibex_icache_q[$]; + ibex_mem_intf_seq_item ibex_mem_intf_slave_q[$]; + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + ibex_icache_fifo = new("ibex_icache_fifo", this); + ibex_mem_intf_slave_fifo = new("ibex_mem_intf_slave_fifo", this); + endfunction + + function void connect_phase(uvm_phase phase); + super.connect_phase(phase); + endfunction + + task run_phase(uvm_phase phase); + super.run_phase(phase); + fork + process_ibex_icache_fifo(); + process_ibex_mem_intf_slave_fifo(); + join_none + endtask + + virtual task process_ibex_icache_fifo(); + ibex_icache_item item; + forever begin + ibex_icache_fifo.get(item); + `uvm_info(`gfn, $sformatf("received ibex_icache item:\n%0s", item.sprint()), UVM_HIGH) + end + endtask + + virtual task process_ibex_mem_intf_slave_fifo(); + ibex_mem_intf_seq_item item; + forever begin + ibex_mem_intf_slave_fifo.get(item); + `uvm_info(`gfn, $sformatf("received ibex_mem_intf_seq item:\n%0s", item.sprint()), UVM_HIGH) + end + endtask + + virtual function void reset(string kind = "HARD"); + super.reset(kind); + // reset local fifos queues and variables + endfunction + + function void check_phase(uvm_phase phase); + super.check_phase(phase); + // post test checks - ensure that all local fifos and queues are empty + endfunction + +endclass diff --git a/dv/uvm/icache/dv/env/ibex_icache_virtual_sequencer.sv b/dv/uvm/icache/dv/env/ibex_icache_virtual_sequencer.sv new file mode 100644 index 00000000..351398aa --- /dev/null +++ b/dv/uvm/icache/dv/env/ibex_icache_virtual_sequencer.sv @@ -0,0 +1,16 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_virtual_sequencer extends dv_base_virtual_sequencer #( + .CFG_T(ibex_icache_env_cfg), + .COV_T(ibex_icache_env_cov) + ); + `uvm_component_utils(ibex_icache_virtual_sequencer) + + ibex_icache_sequencer ibex_icache_sequencer_h; + ibex_mem_intf_slave_sequencer ibex_mem_intf_slave_sequencer_h; + + `uvm_component_new + +endclass diff --git a/dv/uvm/icache/dv/env/seq_lib/ibex_icache_base_vseq.sv b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_base_vseq.sv new file mode 100644 index 00000000..e827e410 --- /dev/null +++ b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_base_vseq.sv @@ -0,0 +1,32 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_base_vseq extends dv_base_vseq #( + .CFG_T (ibex_icache_env_cfg), + .COV_T (ibex_icache_env_cov), + .VIRTUAL_SEQUENCER_T (ibex_icache_virtual_sequencer) + ); + `uvm_object_utils(ibex_icache_base_vseq) + + // various knobs to enable certain routines + bit do_ibex_icache_init = 1'b1; + + `uvm_object_new + + virtual task dut_init(string reset_kind = "HARD"); + super.dut_init(); + if (do_ibex_icache_init) ibex_icache_init(); + endtask + + virtual task dut_shutdown(); + // check for pending ibex_icache operations and wait for them to complete + // TODO + endtask + + // setup basic ibex_icache features + virtual task ibex_icache_init(); + `uvm_error(`gfn, "FIXME") + endtask + +endclass : ibex_icache_base_vseq diff --git a/dv/uvm/icache/dv/env/seq_lib/ibex_icache_common_vseq.sv b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_common_vseq.sv new file mode 100644 index 00000000..030f0a94 --- /dev/null +++ b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_common_vseq.sv @@ -0,0 +1,18 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_common_vseq extends ibex_icache_base_vseq; + `uvm_object_utils(ibex_icache_common_vseq) + + constraint num_trans_c { + num_trans inside {[1:2]}; + } + `uvm_object_new + + virtual task body(); + // TODO: implement the body of the common virtual sequence + endtask : body + + +endclass diff --git a/dv/uvm/icache/dv/env/seq_lib/ibex_icache_sanity_vseq.sv b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_sanity_vseq.sv new file mode 100644 index 00000000..0697d491 --- /dev/null +++ b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_sanity_vseq.sv @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// basic sanity test vseq +class ibex_icache_sanity_vseq extends ibex_icache_base_vseq; + `uvm_object_utils(ibex_icache_sanity_vseq) + + `uvm_object_new + + task body(); + `uvm_error(`gfn, "FIXME") + endtask : body + +endclass : ibex_icache_sanity_vseq diff --git a/dv/uvm/icache/dv/env/seq_lib/ibex_icache_vseq_list.sv b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_vseq_list.sv new file mode 100644 index 00000000..4a8ece41 --- /dev/null +++ b/dv/uvm/icache/dv/env/seq_lib/ibex_icache_vseq_list.sv @@ -0,0 +1,7 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +`include "ibex_icache_base_vseq.sv" +`include "ibex_icache_sanity_vseq.sv" +`include "ibex_icache_common_vseq.sv" diff --git a/dv/uvm/icache/dv/ibex_icache_agent/README.md b/dv/uvm/icache/dv/ibex_icache_agent/README.md new file mode 100644 index 00000000..4ee1f428 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/README.md @@ -0,0 +1,3 @@ +# IBEX_ICACHE UVM Agent + +IBEX_ICACHE UVM Agent is extended from DV library agent classes. diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.core b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.core new file mode 100644 index 00000000..5e7d41d2 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.core @@ -0,0 +1,28 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:ibex_icache_agent:0.1" +description: "IBEX_ICACHE DV UVM agent" +filesets: + files_dv: + depend: + - lowrisc:dv:dv_utils + - lowrisc:dv:dv_lib + files: + - ibex_icache_if.sv + - ibex_icache_agent_pkg.sv + - ibex_icache_item.sv: {is_include_file: true} + - ibex_icache_agent_cfg.sv: {is_include_file: true} + - ibex_icache_agent_cov.sv: {is_include_file: true} + - ibex_icache_driver.sv: {is_include_file: true} + - ibex_icache_monitor.sv: {is_include_file: true} + - ibex_icache_agent.sv: {is_include_file: true} + - seq_lib/ibex_icache_base_seq.sv: {is_include_file: true} + - seq_lib/ibex_icache_seq_list.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.sv new file mode 100644 index 00000000..832ce623 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent.sv @@ -0,0 +1,25 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_agent extends dv_base_agent #( + .CFG_T (ibex_icache_agent_cfg), + .DRIVER_T (ibex_icache_driver), + .SEQUENCER_T (ibex_icache_sequencer), + .MONITOR_T (ibex_icache_monitor), + .COV_T (ibex_icache_agent_cov) +); + + `uvm_component_utils(ibex_icache_agent) + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + // get ibex_icache_if handle + if (!uvm_config_db#(virtual ibex_icache_if)::get(this, "", "vif", cfg.vif)) begin + `uvm_fatal(`gfn, "failed to get ibex_icache_if handle from uvm_config_db") + end + endfunction + +endclass diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cfg.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cfg.sv new file mode 100644 index 00000000..d5b5f122 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cfg.sv @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_agent_cfg extends dv_base_agent_cfg; + + // interface handle used by driver, monitor & the sequencer, via cfg handle + virtual ibex_icache_if vif; + + `uvm_object_utils_begin(ibex_icache_agent_cfg) + `uvm_object_utils_end + + `uvm_object_new + +endclass diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cov.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cov.sv new file mode 100644 index 00000000..4176a0d7 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_cov.sv @@ -0,0 +1,18 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_agent_cov extends dv_base_agent_cov #(ibex_icache_agent_cfg); + `uvm_component_utils(ibex_icache_agent_cov) + + // the base class provides the following handles for use: + // ibex_icache_agent_cfg: cfg + + // covergroups + + function new(string name, uvm_component parent); + super.new(name, parent); + // instantiate all covergroups here + endfunction : new + +endclass diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_pkg.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_pkg.sv new file mode 100644 index 00000000..d1d70ac2 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_agent_pkg.sv @@ -0,0 +1,37 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package ibex_icache_agent_pkg; + // dep packages + import uvm_pkg::*; + import dv_utils_pkg::*; + import dv_lib_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // parameters + + // local types + // forward declare classes to allow typedefs below + typedef class ibex_icache_item; + typedef class ibex_icache_agent_cfg; + + // reuse dv_base_seqeuencer as is with the right parameter set + typedef dv_base_sequencer #(.ITEM_T(ibex_icache_item), + .CFG_T (ibex_icache_agent_cfg)) ibex_icache_sequencer; + + // functions + + // package sources + `include "ibex_icache_item.sv" + `include "ibex_icache_agent_cfg.sv" + `include "ibex_icache_agent_cov.sv" + `include "ibex_icache_driver.sv" + `include "ibex_icache_monitor.sv" + `include "ibex_icache_agent.sv" + `include "ibex_icache_seq_list.sv" + +endpackage: ibex_icache_agent_pkg diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_driver.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_driver.sv new file mode 100644 index 00000000..98d8d2b8 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_driver.sv @@ -0,0 +1,37 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_driver extends dv_base_driver #(ibex_icache_item, ibex_icache_agent_cfg); + `uvm_component_utils(ibex_icache_driver) + + // the base class provides the following handles for use: + // ibex_icache_agent_cfg: cfg + + `uvm_component_new + + virtual task run_phase(uvm_phase phase); + // base class forks off reset_signals() and get_and_drive() tasks + super.run_phase(phase); + endtask + + // reset signals + virtual task reset_signals(); + endtask + + // drive trans received from sequencer + virtual task get_and_drive(); + forever begin + seq_item_port.get_next_item(req); + $cast(rsp, req.clone()); + rsp.set_id_info(req); + `uvm_info(`gfn, $sformatf("rcvd item:\n%0s", req.sprint()), UVM_HIGH) + // TODO: do the driving part + // + // send rsp back to seq + `uvm_info(`gfn, "item sent", UVM_HIGH) + seq_item_port.item_done(rsp); + end + endtask + +endclass diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_if.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_if.sv new file mode 100644 index 00000000..0c9ce9c4 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_if.sv @@ -0,0 +1,11 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +interface ibex_icache_if (); + + // interface pins + + // debug signals + +endinterface diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_item.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_item.sv new file mode 100644 index 00000000..8e325f9a --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_item.sv @@ -0,0 +1,14 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_item extends uvm_sequence_item; + + // random variables + + `uvm_object_utils_begin(ibex_icache_item) + `uvm_object_utils_end + + `uvm_object_new + +endclass diff --git a/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_monitor.sv b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_monitor.sv new file mode 100644 index 00000000..19aaf87f --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/ibex_icache_monitor.sv @@ -0,0 +1,43 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_monitor extends dv_base_monitor #( + .ITEM_T (ibex_icache_item), + .CFG_T (ibex_icache_agent_cfg), + .COV_T (ibex_icache_agent_cov) + ); + `uvm_component_utils(ibex_icache_monitor) + + // the base class provides the following handles for use: + // ibex_icache_agent_cfg: cfg + // ibex_icache_agent_cov: cov + // uvm_analysis_port #(ibex_icache_item): analysis_port + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + endfunction + + task run_phase(uvm_phase phase); + super.run_phase(phase); + endtask + + // collect transactions forever - already forked in dv_base_moditor::run_phase + virtual protected task collect_trans(uvm_phase phase); + forever begin + // TODO: detect event + + // TODO: sample the interface + + // TODO: sample the covergroups + + // TODO: write trans to analysis_port + + // TODO: remove the line below: it is added to prevent zero delay loop in template code + #1us; + end + endtask + +endclass diff --git a/dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_base_seq.sv b/dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_base_seq.sv new file mode 100644 index 00000000..272b32bf --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_base_seq.sv @@ -0,0 +1,18 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_base_seq extends dv_base_seq #( + .REQ (ibex_icache_item), + .CFG_T (ibex_icache_agent_cfg), + .SEQUENCER_T (ibex_icache_sequencer) + ); + `uvm_object_utils(ibex_icache_base_seq) + + `uvm_object_new + + virtual task body(); + `uvm_fatal(`gtn, "Need to override this when you extend from this class!") + endtask + +endclass diff --git a/dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_seq_list.sv b/dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_seq_list.sv new file mode 100644 index 00000000..368e630b --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_agent/seq_lib/ibex_icache_seq_list.sv @@ -0,0 +1,5 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +`include "ibex_icache_base_seq.sv" diff --git a/dv/uvm/icache/dv/ibex_icache_sim.core b/dv/uvm/icache/dv/ibex_icache_sim.core new file mode 100644 index 00000000..1992b80b --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_sim.core @@ -0,0 +1,25 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:ibex_icache_sim:0.1" +description: "IBEX_ICACHE DV sim target" +filesets: + files_rtl: + depend: + - lowrisc:ibex:ibex_icache:0.1 + + files_dv: + depend: + - lowrisc:dv:ibex_icache_test + files: + - tb/tb.sv + file_type: systemVerilogSource + +targets: + sim: + filesets: + - files_rtl + - files_dv + toplevel: tb + default_tool: vcs diff --git a/dv/uvm/icache/dv/ibex_icache_sim_cfg.hjson b/dv/uvm/icache/dv/ibex_icache_sim_cfg.hjson new file mode 100644 index 00000000..b2f94b49 --- /dev/null +++ b/dv/uvm/icache/dv/ibex_icache_sim_cfg.hjson @@ -0,0 +1,58 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + // Name of the sim cfg - typically same as the name of the DUT. + name: ibex_icache + + // Top level dut name (sv module). + dut: ibex_icache + + // Top level testbench name (sv module). + tb: tb + + // Simulator used to sign off this block + tool: vcs + + // Fusesoc core file used for building the file list. + fusesoc_core: lowrisc:dv:ibex_icache_sim:0.1 + + // Testplan hjson file. + testplan: "{proj_root}/dv/uvm/icache/data/ibex_icache_testplan.hjson" + + + // Import additional common sim cfg files. + // TODO: remove imported cfgs that do not apply. + import_cfgs: [// Project wide common sim cfg file + "{proj_root}/dv/uvm/data/common_sim_cfg.hjson"] + + // Default iterations for all tests - each test entry can override this. + reseed: 50 + + gen_ral_pkg_cmd: "" + gen_ral_pkg_dir: "" + gen_ral_pkg_opts: [] + + // Default UVM test and seq class name. + uvm_test: ibex_icache_base_test + uvm_test_seq: ibex_icache_base_vseq + + // List of test specifications. + tests: [ + { + name: ibex_icache_sanity + uvm_test_seq: ibex_icache_sanity_vseq + } + + // TODO: add more tests here + ] + + // List of regressions. + regressions: [ + { + name: sanity + tests: ["ibex_icache_sanity"] + } + ] +} + diff --git a/dv/uvm/icache/dv/tb/tb.sv b/dv/uvm/icache/dv/tb/tb.sv new file mode 100644 index 00000000..253471ee --- /dev/null +++ b/dv/uvm/icache/dv/tb/tb.sv @@ -0,0 +1,41 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +module tb; + // dep packages + import uvm_pkg::*; + import dv_utils_pkg::*; + import ibex_icache_env_pkg::*; + import ibex_icache_test_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + wire clk, rst_n; + + // interfaces + clk_rst_if clk_rst_if(.clk(clk), .rst_n(rst_n)); + ibex_icache_if ibex_icache_if(); + ibex_mem_intf ibex_mem_intf(); + + // dut + ibex_icache dut ( + .clk_i (clk ), + .rst_ni (rst_n ) + + // TODO: add remaining IOs and hook them + ); + + initial begin + // drive clk and rst_n from clk_if + clk_rst_if.set_active(); + uvm_config_db#(virtual clk_rst_if)::set(null, "*.env", "clk_rst_vif", clk_rst_if); + uvm_config_db#(virtual ibex_icache_if)::set(null, "*.env.m_ibex_icache_agent*", "vif", ibex_icache_if); + uvm_config_db#(virtual ibex_mem_intf)::set(null, "*.env.m_ibex_mem_intf_slave_agent*", "vif", ibex_mem_intf); + $timeformat(-12, 0, " ps", 12); + run_test(); + end + +endmodule diff --git a/dv/uvm/icache/dv/tests/ibex_icache_base_test.sv b/dv/uvm/icache/dv/tests/ibex_icache_base_test.sv new file mode 100644 index 00000000..79396144 --- /dev/null +++ b/dv/uvm/icache/dv/tests/ibex_icache_base_test.sv @@ -0,0 +1,26 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ibex_icache_base_test extends dv_base_test #( + .CFG_T(ibex_icache_env_cfg), + .ENV_T(ibex_icache_env) + ); + + `uvm_component_utils(ibex_icache_base_test) + `uvm_component_new + + // the base class dv_base_test creates the following instances: + // ibex_icache_env_cfg: cfg + // ibex_icache_env: env + + virtual function void build_phase(uvm_phase phase); + super.build_phase(phase); + cfg.has_ral = 1'b0; + endfunction + + // the base class also looks up UVM_TEST_SEQ plusarg to create and run that seq in + // the run_phase; as such, nothing more needs to be done + +endclass : ibex_icache_base_test + diff --git a/dv/uvm/icache/dv/tests/ibex_icache_test.core b/dv/uvm/icache/dv/tests/ibex_icache_test.core new file mode 100644 index 00000000..a1a68a43 --- /dev/null +++ b/dv/uvm/icache/dv/tests/ibex_icache_test.core @@ -0,0 +1,19 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:ibex_icache_test:0.1" +description: "IBEX_ICACHE DV UVM test" +filesets: + files_dv: + depend: + - lowrisc:dv:ibex_icache_env + files: + - ibex_icache_test_pkg.sv + - ibex_icache_base_test.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/dv/uvm/icache/dv/tests/ibex_icache_test_pkg.sv b/dv/uvm/icache/dv/tests/ibex_icache_test_pkg.sv new file mode 100644 index 00000000..c00229ad --- /dev/null +++ b/dv/uvm/icache/dv/tests/ibex_icache_test_pkg.sv @@ -0,0 +1,22 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package ibex_icache_test_pkg; + // dep packages + import uvm_pkg::*; + import dv_lib_pkg::*; + import ibex_icache_env_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // local types + + // functions + + // package sources + `include "ibex_icache_base_test.sv" + +endpackage diff --git a/dv/uvm/icache/dv/top_pkg.core b/dv/uvm/icache/dv/top_pkg.core new file mode 100644 index 00000000..b9aacee4 --- /dev/null +++ b/dv/uvm/icache/dv/top_pkg.core @@ -0,0 +1,20 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 + +# XXX: This name is currently required as global identifier until we have +# support for "interfaces" or a similar concept. +# Tracked in https://github.com/olofk/fusesoc/issues/235 +name: "lowrisc:constants:top_pkg" +description: "Toplevel-wide constants needed for dv_utils" +filesets: + files_rtl: + files: + - top_pkg.sv + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_rtl diff --git a/dv/uvm/icache/dv/top_pkg.sv b/dv/uvm/icache/dv/top_pkg.sv new file mode 100644 index 00000000..01f62462 --- /dev/null +++ b/dv/uvm/icache/dv/top_pkg.sv @@ -0,0 +1,25 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// + +package top_pkg; + +localparam TL_AW=32; +localparam TL_DW=32; // = TL_DBW * 8; TL_DBW must be a power-of-two +localparam TL_AIW=8; // a_source, d_source +localparam TL_DIW=1; // d_sink +localparam TL_DUW=16; // d_user +localparam TL_DBW=(TL_DW>>3); +localparam TL_SZW=$clog2($clog2(TL_DBW)+1); +localparam FLASH_BANKS=2; +localparam FLASH_PAGES_PER_BANK=256; +localparam FLASH_WORDS_PER_PAGE=256; +localparam FLASH_BYTES_PER_WORD=4; +localparam FLASH_BKW = $clog2(FLASH_BANKS); +localparam FLASH_PGW = $clog2(FLASH_PAGES_PER_BANK); +localparam FLASH_WDW = $clog2(FLASH_WORDS_PER_PAGE); +localparam FLASH_AW = FLASH_BKW + FLASH_PGW + FLASH_WDW; +localparam FLASH_DW = FLASH_BYTES_PER_WORD * 8; + +endpackage diff --git a/ibex_core.core b/ibex_core.core index ef6ab83b..55f008a3 100644 --- a/ibex_core.core +++ b/ibex_core.core @@ -10,6 +10,7 @@ filesets: depend: - lowrisc:prim:assert - lowrisc:ibex:ibex_pkg + - lowrisc:ibex:ibex_icache files: - rtl/ibex_alu.sv - rtl/ibex_compressed_decoder.sv @@ -19,7 +20,6 @@ filesets: - rtl/ibex_decoder.sv - rtl/ibex_ex_block.sv - rtl/ibex_fetch_fifo.sv - - rtl/ibex_icache.sv - rtl/ibex_id_stage.sv - rtl/ibex_if_stage.sv - rtl/ibex_load_store_unit.sv diff --git a/ibex_icache.core b/ibex_icache.core new file mode 100644 index 00000000..0ef0f338 --- /dev/null +++ b/ibex_icache.core @@ -0,0 +1,20 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:ibex:ibex_icache:0.1" +description: "IBEX_ICACHE DV sim target" +filesets: + files_rtl: + depend: + - lowrisc:ibex:sim_shared + files: + - rtl/ibex_icache.sv + file_type: systemVerilogSource + +targets: + default: &default_target + filesets: + - files_rtl + toplevel: ibex_icache + default_tool: vcs diff --git a/vendor/lowrisc_common_ifs.lock.hjson b/vendor/lowrisc_common_ifs.lock.hjson new file mode 100644 index 00000000..c57dd374 --- /dev/null +++ b/vendor/lowrisc_common_ifs.lock.hjson @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// This file is generated by the util/vendor script. Please do not modify it +// manually. + +{ + upstream: + { + url: https://github.com/lowRISC/opentitan + rev: 0d7f7ac755d4e00811257027dd814edb2afca050 + only_subdir: hw/dv/sv/common_ifs + } +} diff --git a/vendor/lowrisc_common_ifs.vendor.hjson b/vendor/lowrisc_common_ifs.vendor.hjson new file mode 100644 index 00000000..647e5212 --- /dev/null +++ b/vendor/lowrisc_common_ifs.vendor.hjson @@ -0,0 +1,13 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "common_ifs", + target_dir: "lowrisc_ip/common_ifs", + + upstream: { + url: "https://github.com/lowRISC/opentitan" + rev: "master" + only_subdir: "hw/dv/sv/common_ifs" + } +} diff --git a/vendor/lowrisc_csr_utils.lock.hjson b/vendor/lowrisc_csr_utils.lock.hjson new file mode 100644 index 00000000..faf766f1 --- /dev/null +++ b/vendor/lowrisc_csr_utils.lock.hjson @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// This file is generated by the util/vendor script. Please do not modify it +// manually. + +{ + upstream: + { + url: https://github.com/lowRISC/opentitan + rev: 0d7f7ac755d4e00811257027dd814edb2afca050 + only_subdir: hw/dv/sv/csr_utils + } +} diff --git a/vendor/lowrisc_csr_utils.vendor.hjson b/vendor/lowrisc_csr_utils.vendor.hjson new file mode 100644 index 00000000..015c0738 --- /dev/null +++ b/vendor/lowrisc_csr_utils.vendor.hjson @@ -0,0 +1,13 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "csr_utils", + target_dir: "lowrisc_ip/csr_utils", + + upstream: { + url: "https://github.com/lowRISC/opentitan" + rev: "master" + only_subdir: "hw/dv/sv/csr_utils" + } +} diff --git a/vendor/lowrisc_dv_lib.lock.hjson b/vendor/lowrisc_dv_lib.lock.hjson new file mode 100644 index 00000000..b8f4afdb --- /dev/null +++ b/vendor/lowrisc_dv_lib.lock.hjson @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// This file is generated by the util/vendor script. Please do not modify it +// manually. + +{ + upstream: + { + url: https://github.com/lowRISC/opentitan + rev: 0d7f7ac755d4e00811257027dd814edb2afca050 + only_subdir: hw/dv/sv/dv_lib + } +} diff --git a/vendor/lowrisc_dv_lib.vendor.hjson b/vendor/lowrisc_dv_lib.vendor.hjson new file mode 100644 index 00000000..adafe411 --- /dev/null +++ b/vendor/lowrisc_dv_lib.vendor.hjson @@ -0,0 +1,13 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "dv_lib", + target_dir: "lowrisc_ip/dv_lib", + + upstream: { + url: "https://github.com/lowRISC/opentitan" + rev: "master" + only_subdir: "hw/dv/sv/dv_lib" + } +} diff --git a/vendor/lowrisc_dv_utils.lock.hjson b/vendor/lowrisc_dv_utils.lock.hjson new file mode 100644 index 00000000..fb08a743 --- /dev/null +++ b/vendor/lowrisc_dv_utils.lock.hjson @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// This file is generated by the util/vendor script. Please do not modify it +// manually. + +{ + upstream: + { + url: https://github.com/lowRISC/opentitan + rev: 0d7f7ac755d4e00811257027dd814edb2afca050 + only_subdir: hw/dv/sv/dv_utils + } +} diff --git a/vendor/lowrisc_dv_utils.vendor.hjson b/vendor/lowrisc_dv_utils.vendor.hjson new file mode 100644 index 00000000..8fb2fc15 --- /dev/null +++ b/vendor/lowrisc_dv_utils.vendor.hjson @@ -0,0 +1,14 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "dv_utils", + target_dir: "lowrisc_ip/dv_utils", + patch_dir: "patches/lowrisc_dv_utils", + + upstream: { + url: "https://github.com/lowRISC/opentitan" + rev: "master" + only_subdir: "hw/dv/sv/dv_utils" + } +} diff --git a/vendor/lowrisc_dvsim.lock.hjson b/vendor/lowrisc_dvsim.lock.hjson new file mode 100644 index 00000000..e1bd75c0 --- /dev/null +++ b/vendor/lowrisc_dvsim.lock.hjson @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// This file is generated by the util/vendor script. Please do not modify it +// manually. + +{ + upstream: + { + url: https://github.com/lowRISC/opentitan + rev: 0d7f7ac755d4e00811257027dd814edb2afca050 + only_subdir: util/dvsim + } +} diff --git a/vendor/lowrisc_dvsim.vendor.hjson b/vendor/lowrisc_dvsim.vendor.hjson new file mode 100644 index 00000000..3d316460 --- /dev/null +++ b/vendor/lowrisc_dvsim.vendor.hjson @@ -0,0 +1,13 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "dvsim", + target_dir: "lowrisc_ip/dvsim", + + upstream: { + url: "https://github.com/lowRISC/opentitan" + rev: "master" + only_subdir: "util/dvsim" + } +} diff --git a/vendor/lowrisc_ip/common_ifs/clk_if.sv b/vendor/lowrisc_ip/common_ifs/clk_if.sv new file mode 100644 index 00000000..49030ec4 --- /dev/null +++ b/vendor/lowrisc_ip/common_ifs/clk_if.sv @@ -0,0 +1,29 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +//----------------------------- DESCRIPTION ------------------------------------ +// +// Generic clock interface for clock events in various utilities +// +//------------------------------------------------------------------------------ + +interface clk_if(input logic clk); + + clocking cb @(posedge clk); + endclocking + + clocking cbn @(negedge clk); + endclocking + + // Wait for 'n' clocks based of postive clock edge + task automatic wait_clks(int num_clks); + repeat (num_clks) @cb; + endtask + + // Wait for 'n' clocks based of negative clock edge + task automatic wait_n_clks(int num_clks); + repeat (num_clks) @cbn; + endtask + +endinterface diff --git a/vendor/lowrisc_ip/common_ifs/clk_rst_if.sv b/vendor/lowrisc_ip/common_ifs/clk_rst_if.sv new file mode 100644 index 00000000..124815d9 --- /dev/null +++ b/vendor/lowrisc_ip/common_ifs/clk_rst_if.sv @@ -0,0 +1,224 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// +// Interface: clk_rst_if +// Generic clock and reset interface for clock events in various utilities +// It also generates o_clk and o_rst_n signals for driving clk and rst_n in the tb. The advantage is +// clk and rst_n can be completely controlled in course of the simulation. +// This interface provides methods to set freq/period, wait for clk/rst_n, apply rst_n among other +// things. See individual method descriptions below. +// inout clk +// inout rst_n + +interface clk_rst_if #( + parameter string IfName = "main" +) ( + inout clk, + inout rst_n +); + +`ifndef VERILATOR + // include macros and import pkgs + `include "dv_macros.svh" + `include "uvm_macros.svh" + import uvm_pkg::*; +`endif + + bit drive_clk; // enable clk generation + logic o_clk; // output clk + + bit drive_rst_n; // enable rst_n generation + logic o_rst_n; // output rst_n + + // clk params + bit clk_gate = 1'b0; // clk gate signal + int clk_period_ps = 20_000; // 50MHz default + int clk_freq_mhz = 50; // 50MHz default + int duty_cycle = 50; // 50% default + int max_jitter_ps = 1000; // 1ns default + bit recompute = 1'b1; // compute half periods when period/freq/duty are changed + int clk_hi_ps; // half period hi in ps + int clk_lo_ps; // half period lo in ps + int jitter_chance_pc = 0; // jitter chance in percentage on clock edge - disabled by default + + // use IfName as a part of msgs to indicate which clk_rst_vif instance + string msg_id = {"clk_rst_if::", IfName}; + + clocking cb @(posedge clk); + endclocking + + clocking cbn @(negedge clk); + endclocking + + // Wait for 'n' clocks based of postive clock edge + task automatic wait_clks(int num_clks); + repeat (num_clks) @cb; + endtask + + // Wait for 'n' clocks based of negative clock edge + task automatic wait_n_clks(int num_clks); + repeat (num_clks) @cbn; + endtask + + // wait for rst_n to assert and then deassert + task automatic wait_for_reset(bit wait_negedge = 1'b1, bit wait_posedge = 1'b1); + if (wait_negedge && ($isunknown(rst_n) || rst_n === 1'b1)) @(negedge rst_n); + if (wait_posedge && (rst_n === 1'b0)) @(posedge rst_n); + endtask + + // set the clk frequency in mhz + function automatic void set_freq_mhz(int freq_mhz); + clk_freq_mhz = freq_mhz; + clk_period_ps = 1000_000 / clk_freq_mhz; + recompute = 1'b1; + endfunction + + // call this function at t=0 (from tb top) to enable clk and rst_n to be driven + function automatic void set_active(bit drive_clk_val = 1'b1, bit drive_rst_n_val = 1'b1); + time t = $time; + if (t == 0) begin + drive_clk = drive_clk_val; + drive_rst_n = drive_rst_n_val; + end + else begin +`ifdef VERILATOR + $error({msg_id, "this function can only be called at t=0"}); +`else + `uvm_fatal(msg_id, "this function can only be called at t=0") +`endif + end + endfunction + + // set the clk period in ns + function automatic void set_period_ns(int period_ps); + clk_period_ps = period_ps; + clk_freq_mhz = 1000_000 / clk_period_ps; + recompute = 1'b1; + endfunction + + // set the duty cycle (1-99) + function automatic void set_duty_cycle(int duty); + if (!(duty inside {[1:99]})) begin +`ifdef VERILATOR + $error({msg_id, $sformatf("duty cycle %0d is not inside [1:99]", duty)}); +`else + `uvm_fatal(msg_id, $sformatf("duty cycle %0d is not inside [1:99]", duty)) +`endif + end + duty_cycle = duty; + recompute = 1'b1; + endfunction + + // set maximum jitter in ps + function automatic void set_max_jitter_ps(int jitter_ps); + max_jitter_ps = jitter_ps; + endfunction + + // set jitter chance in percentage (0 - 100) + // 0 - dont add any jitter; 100 - add jitter on every clock edge + function automatic void set_jitter_chance_pc(int jitter_chance); + if (!(jitter_chance inside {[0:100]})) begin +`ifdef VERILATOR + $error({msg_id, $sformatf("jitter_chance %0d is not inside [0:100]", jitter_chance)}); +`else + `uvm_fatal(msg_id, $sformatf("jitter_chance %0d is not inside [0:100]", jitter_chance)) +`endif + end + jitter_chance_pc = jitter_chance; + endfunction + + // start / ungate the clk + task automatic start_clk(bit wait_for_posedge = 1'b0); + clk_gate = 1'b0; + if (wait_for_posedge) wait_clks(1); + endtask + + // stop / gate the clk + function automatic void stop_clk(); + clk_gate = 1'b1; + endfunction + + // add jitter to clk_hi and clk_lo half periods based on jitter_chance_pc + function automatic void add_jitter(); + int jitter_ps; + if ($urandom_range(1, 100) <= jitter_chance_pc) begin +`ifndef VERILATOR + `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(jitter_ps, + jitter_ps inside {[-1*max_jitter_ps:max_jitter_ps]};, "", msg_id) +`endif + clk_hi_ps += jitter_ps; + end + if ($urandom_range(1, 100) <= jitter_chance_pc) begin +`ifndef VERILATOR + `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(jitter_ps, + jitter_ps inside {[-1*max_jitter_ps:max_jitter_ps]};, "", msg_id) +`endif + clk_lo_ps += jitter_ps; + end + endfunction + + // apply reset with specified scheme + // TODO make this enum? + // rst_n_scheme + // 0 - fullly synchronous reset - it is asserted and deasserted on clock edges + // 1 - async assert, sync dessert (default) + // 2 - async assert, async dessert + // 3 - clk gated when reset asserted + // Note: for power on reset, please ensure pre_reset_dly_clks is set to 0 + task automatic apply_reset(int pre_reset_dly_clks = 0, + int reset_width_clks = $urandom_range(4, 20), + int post_reset_dly_clks = 0, + int rst_n_scheme = 1); + int dly_ps; + dly_ps = $urandom_range(0, clk_period_ps); + wait_clks(pre_reset_dly_clks); + case (rst_n_scheme) + 0: begin : sync_assert_deassert + o_rst_n <= 1'b0; + wait_clks(reset_width_clks); + o_rst_n <= 1'b1; + end + 1: begin : async_assert_sync_deassert + #(dly_ps * 1ps); + o_rst_n <= 1'b0; + wait_clks(reset_width_clks); + o_rst_n <= 1'b1; + end + 2: begin : async_assert_async_deassert + #(dly_ps * 1ps); + o_rst_n <= 1'b0; + wait_clks(reset_width_clks); + dly_ps = $urandom_range(0, clk_period_ps); + #(dly_ps * 1ps); + o_rst_n <= 1'b1; + end + endcase + wait_clks(post_reset_dly_clks); + endtask + + // clk gen + initial begin + // start driving clk only after the first por reset assertion + wait_for_reset(.wait_posedge(1'b0)); + #1ps o_clk = 1'b0; + forever begin + if (recompute) begin + clk_hi_ps = clk_period_ps * duty_cycle / 100; + clk_lo_ps = clk_period_ps - clk_hi_ps; + recompute = 1'b0; + end + if (jitter_chance_pc != 0) add_jitter(); + #(clk_lo_ps * 1ps); + // wiggle output clk if not gated + if (!clk_gate) o_clk = 1'b1; + #(clk_hi_ps * 1ps); + o_clk = 1'b0; + end + end + + assign clk = drive_clk ? o_clk : 1'bz; + assign rst_n = drive_rst_n ? o_rst_n : 1'bz; + +endinterface diff --git a/vendor/lowrisc_ip/common_ifs/common_ifs.core b/vendor/lowrisc_ip/common_ifs/common_ifs.core new file mode 100644 index 00000000..31ca6932 --- /dev/null +++ b/vendor/lowrisc_ip/common_ifs/common_ifs.core @@ -0,0 +1,20 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:common_ifs" +description: "Common interfaces used in DV" + +filesets: + files_dv: + depend: + - lowrisc:dv:pins_if + files: + - clk_if.sv + - clk_rst_if.sv + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/common_ifs/index.md b/vendor/lowrisc_ip/common_ifs/index.md new file mode 100644 index 00000000..d54a46cb --- /dev/null +++ b/vendor/lowrisc_ip/common_ifs/index.md @@ -0,0 +1,81 @@ +# Common interfaces + + +## Overview +In this directory, we provide commonly used interfaces used to construct +testbenches for DV. These interfaces are instantiated inside `tb` module for +connecting dut signals. They are described in detail below. + +### `clk_if` +This is a passive clock interface that is used to wait for clock events in +testbenches. +This interface has two clocking blocks: `cb` and `cbn` for synchronizing to +positive and negative clock edges. This interface consists of following tasks: +* `wait_clks`: waits for specified number of positive clock edges +* `wait_n_clks`: waits for specified number of negative clock edges + +### `clk_rst_if` +This interface provides the ability to drive / sample clock and reset signal. +It provides various methods related to clock and reset generation. These +methods can be categorized into `setup methods` and `drive / sample` methods. +Following are `setup methods` of `pins_if`: +* `set_freq_mhz`: set the clk frequency in mhz and calclate period in ns +* `set_duty_cycle`: set the duty cycle (1-99) +* `set_active`: enables `clk` and `rst_n` generation + typically, called at t=0 (from tb top) +* `set_period_ns`: set the clk period in ns and calculate frequency in mhz +* `set_jitter_chance_pc`: set jitter chance in percentage (0 - 100) + * 0: do not add any jitter + * 100: add jitter on every clock edge +* `set_max_jitter_ps`: set maximum jitter in ps +Following are `drive / sample` methods of `pins_if`: +* `wait_for_reset`: wait for rst_n to assert and then deassert +* `apply_reset`: apply reset with specified scheme out of following: + * fullly synchronous reset + * async assert, sync dessert + * async assert, async dessert + * clk gated when reset asserted +* `add_jitter`: add jitter to `clk_hi` and `clk_lo` half periods based on + `jitter_chance_pc` +* `start_clk`: start / ungate clock +* `stop_clk`: stop / gate the clk +* `wait_clks`: waits for specified number of positive clock edges +* `wait_n_clks`: waits for specified number of negative clock edges + +### `pins_if` +This paramterized interface provides the ability to drive / sample any signal +in the DUT. +```systemverilog +interface pins_if #( + parameter int Width = 1 +) ( + inout [Width-1:0] pins +); +``` +The member `pins` is inout type and it can be connected to any of input or +output port within of dut to drive or sample them. `pins` can be driven either +internally using `pins_o` and `pins_oe` signals, that constitute a tri-state +buffer implementation. This provide an ability to disconnects `pins` by driving +them to high impedance state. `pins` may also be driven through an external +driver that it gets connected to. This interface also provides capability +to drive weak pull-up or pull-down on `pins` in case of no internal or external +drivers. The members `pins_pu` and `pins_pd` control weak pull-up or pull-down +functionality. Following diagram explains working of `pins_if`: + +## `pins_if` block diagram +![Block diagram](pins_if.svg) + +Some of the commonly used methods of `pins_if` are as follows: +* `drive_en_pin`: Drive specified value `val` on specified index `idx` of + `pins_oe` signal +* `drive_en`: Drive `pins_oe` signal to specified value `val` +* `drive_pin`: Drive specified index `idx` of pins_oe signal to 1, and the same + index of `pins_o` to specified value `val` + value +* `drive`: Drive `pins_oe` to all 1's and specified value `val` on `pins_o` +* `sample_pin`: Sample and return value of `pins[idx]` for specified index `idx` +* `sample`: Sample and return value of `pins` +* `set_pullup_en`: Implement pull-up on specific bits of `pins` based on + specified value `val` +* `set_pulldown_en`: Implement pull-down on specifc bits of `pins` based on + specified value `val` diff --git a/vendor/lowrisc_ip/common_ifs/pins_if.sv b/vendor/lowrisc_ip/common_ifs/pins_if.sv new file mode 100644 index 00000000..2a419ed7 --- /dev/null +++ b/vendor/lowrisc_ip/common_ifs/pins_if.sv @@ -0,0 +1,96 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// Interface: pins_if +// Description: Pin interface for driving and sampling individual pins such as interrupts, alerts +// and gpios. +`ifndef SYNTHESIS + +interface pins_if #( + parameter int Width = 1 +) ( + inout [Width-1:0] pins +); + + + logic [Width-1:0] pins_o; // value to be driven out + wire [Width-1:0] pins_int; // value of pin using internal pull-up / pull-down + bit [Width-1:0] pins_oe = '0; // output enable + bit [Width-1:0] pins_pd = '0; // pull down enable + bit [Width-1:0] pins_pu = '0; // pull up enable + + // function to set pin output enable for specific pin (useful for single pin interface) + function automatic void drive_en_pin(int idx = 0, bit val); + pins_oe[idx] = val; + endfunction + + // function to set pin output enable for all pins + function automatic void drive_en(bit [Width-1:0] val); + pins_oe = val; + endfunction + + // function to drive a specific pin with a value (useful for single pin interface) + function automatic void drive_pin(int idx = 0, logic val); + pins_oe[idx] = 1'b1; + pins_o[idx] = val; + endfunction // drive_pin + + // function to drive all pins + function automatic void drive(logic [Width-1:0] val); + pins_oe = {Width{1'b1}}; + pins_o = val; + endfunction // drive + + // function to drive all pull down values + function automatic void set_pulldown_en(bit [Width-1:0] val); + pins_pd = val; + endfunction // set_pulldown_en + + // function to drive all pull up values + function automatic void set_pullup_en(bit [Width-1:0] val); + pins_pu = val; + endfunction // set_pullup_en + + // function to drive the pull down value on a specific pin + function automatic void set_pulldown_en_pin(int idx = 0, bit val); + pins_pd[idx] = val; + endfunction // set_pulldown_en_pin + + // function to drive the pull up value on a specific pin + function automatic void set_pullup_en_pin(int idx = 0, bit val); + pins_pu[idx] = val; + endfunction // set_pullup_en_pin + + // function to sample a specific pin (useful for single pin interface) + function automatic logic sample_pin(int idx = 0); + return pins[idx]; + endfunction + + // function to sample all pins + function automatic logic [Width-1:0] sample(); + return pins; + endfunction + + // make connections + generate + for (genvar i = 0; i < Width; i++) begin : each_pin + assign pins_int[i] = pins_pd[i] ? 1'b0 : + pins_pu[i] ? 1'b1 : 1'bz; + // If output enable is 1, strong driver assigns pin to 'value to be driven out'; + // the external strong driver can still affect pin, if exists. + // Else if output enable is 0, weak pullup or pulldown is applied to pin. + // By doing this, we make sure that weak pullup or pulldown does not override + // any 'x' value on pin, that may result due to conflicting values + // between 'value to be driven out' and the external driver's value. + assign pins[i] = pins_oe[i] ? pins_o[i] : 1'bz; +`ifdef VERILATOR + assign pins[i] = ~pins_oe[i] ? pins_int[i] : 1'bz; +`else + assign (pull0, pull1) pins[i] = ~pins_oe[i] ? pins_int[i] : 1'bz; +`endif + end + endgenerate + +endinterface +`endif diff --git a/vendor/lowrisc_ip/common_ifs/pins_if.svg b/vendor/lowrisc_ip/common_ifs/pins_if.svg new file mode 100644 index 00000000..8d9d5d00 --- /dev/null +++ b/vendor/lowrisc_ip/common_ifs/pins_if.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/vendor/lowrisc_ip/common_ifs/pins_ifs.core b/vendor/lowrisc_ip/common_ifs/pins_ifs.core new file mode 100644 index 00000000..f528e12d --- /dev/null +++ b/vendor/lowrisc_ip/common_ifs/pins_ifs.core @@ -0,0 +1,17 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:pins_if" +description: "Common interfaces used in DV" + +filesets: + files_dv: + files: + - pins_if.sv + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/csr_utils/README.md b/vendor/lowrisc_ip/csr_utils/README.md new file mode 100644 index 00000000..f2ea5ef6 --- /dev/null +++ b/vendor/lowrisc_ip/csr_utils/README.md @@ -0,0 +1,159 @@ +# CSR utilities + + +This csr_utils folder intends to implement CSR related methods and test sequences for DV +to share across all testbenches. + +### CSR utility package +`csr_utils_pkg` provides common methods and properties to support and manage CSR accesses +and CSR related test sequences. + +#### Global types and variables +All common types and variables are defined at this package level. Examples are: +```systemverilog + uint outstanding_accesses = 0; + uint default_timeout_ns = 1_000_000; +``` + +##### Outstanding_accesses +`csr_utils_pkg` used an internal variable to store the number of accesses +(read or write) that have not yet completed. This variable is shared among all methods of +register reading and writing. Directly accessing this variable is discouraged. Instead, +the following methods are used to control this variable to keep track of non-blocking +accesses made in the testbench: +```systemverilog + function automatic void increment_outstanding_access(); + outstanding_accesses++; + endfunction + + function automatic void decrement_outstanding_access(); + outstanding_accesses--; + endfunction + + task automatic wait_no_outstanding_access(); + wait(outstanding_accesses == 0); + endtask + + function automatic void clear_outstanding_access(); + outstanding_accesses = 0; + endfunction +``` + +##### CSR spinwait +One of the commonly used tasks in `csr_utils_pkg` is `csr_spinwait`. This task +can poll a CSR or CSR field continuously or periodically until it reads out the +expected value. This task also has a timeout check in case due to DUT or testbench +issue, the CSR or CSR field never returns the expected value. +Example below uses the `csr_spinwait` to wait until the CSR `fifo_status` field +`fifo_full` reaches value bit 1: +```systemverilog +csr_spinwait(.ptr(ral.status.fifo_full), .exp_data(1'b0)); +``` + +##### Read and check all CSRs +The purpose of the `read_and_check_all_csrs` task is to read all valid CSRs from +the given `uvm_reg_block` and check against their expected values from RAL. This +task is primarily implemented to use after reset, to make sure all the CSRs are +being reset to the default value. + +##### Under_reset +Due to `csr_utils_pkg` is not connected to any interface, methods inside +this package are not able to get reset information. Current the `under_reset` +bit is declared with two functions: +```systemverilog +function automatic void reset_asserted(); + under_reset = 1; +endfunction + +function automatic void reset_deasserted(); + under_reset = 0; +endfunction +``` +This reset information is updated in `dv_lib/dv_base_vseq.sv`. When the +`apply_reset` task is triggered, it will set and reset the `under_reset` bit +via the functions above. + +#### Global CSR util methods +##### Global methods for CSR and MEM attributes +This package provides methods to access CSR or Memory attributes, such as address, +value, etc. Examples are: + * `get_csr_addrs` + * `get_mem_addr_ranges` + * `decode_csr_or_field` + +##### Global methods for CSR access +The CSR access methods are based on `uvm_reg` methods, such as `uvm_reg::read()`, +`uvm_reg::write()`, `uvm_reg::update()`. For all CSR methods, user can +pass either a register or a field handle. Examples are: + * `csr_rd_check`: Given the uvm_reg or uvm_reg_field object, this method will + compare the CSR value with the expected value (given as an input) or with + the RAL mirrored value + * `csr_update`: Given the uvm_reg object, this method will update the value of the + register in DUT to match the desired value + +To enhance the usability, these methods support CSR blocking, non-blocking +read/write, and a timeout checking. + * A blocking thread will not execute the next sequence until the current CSR + access is finished + * A non-blocking thread allows multiple CSR accesses to be issued back-to-back + without waiting for the response + * A timeout check will discard the ongoing CSR access by disabling the forked + thread and will throw a UVM_ERROR once the process exceeds the max timeout setting + +### CSR sequence library +`csr_seq_lib.sv` provides common CSR related test sequences to share across all testbenches. +These test sequences are based off the standard sequences provided in UVM1.2 RAL. +The parent class (DUT-specific test or sequence class) that creates them needs to provide them +with the DUT RAL model. The list of CSRs are then extracted from the RAL model to performs the checks. +In addition, the test sequences provide an ability to exclude a CSR from writes or reads (or both) +depending on the behavior of the CSR in the design. This is explained more in the +[CSR exclusion methodology](#csr-exclusion-methodology) section below. +All CSR accesses in these sequences are made non-blocking to ensure back-to-back scenarios +are exercised. +Supported CSR test sequences are: + * `csr_hw_reset`: Write all CSRs with random values and then reset the DUT. + After reset, read all CSRs and compare with expected values + * `csr_rw`: Write a randomly selected CSRs, then read out the updated + CSR or CSR field and compare with expected value + * `csr_bit_bash`: Randomly select a CSR and write 1's and 0's to + every bit, then read the CSR to compare with expected value + * `csr_aliasing`: Randomly write a CSR, then read all CSRs to + verify that only the CSR that was written was updated + * `mem_walk`: Write and read all valid addresses in the memory. Compare + the read results with the expected values + +### CSR exclusion methodology +The CSR test sequences listed above intend to perform a sanity check to CSR +read/write accesses, but do not intend to check specific DUT functionalities. Thus the +sequences might need to exclude reading or writing certain CSRs depending on the +specific testbench. +`csr_excl_item` is a class that supports adding exclusions to CSR test sequences. +Examples of useful functions in this class are: +* `add_excl`: Add exclusions to the CSR test sequences. This function has two inputs: + - Exclusion scope: A hierarchical path name at all levels including block, + CSR, and field. This input supports * and ? wildcards for glob style matching + - CSR_exclude type: An enumeration defined as below: + ```systemverilog + typedef enum bit[2:0] { + CsrNoExcl = 3'b000, // no exclusions + CsrExclInitCheck = 3'b001, // exclude csr from init val check + CsrExclWriteCheck = 3'b010, // exclude csr from write-read check + CsrExclCheck = 3'b011, // exclude csr from init or write-read check + CsrExclWrite = 3'b100, // exclude csr from write + CsrExclAll = 3'b111 // exclude csr from init or write or writ-read check + } csr_excl_type_e; + ``` + + One example to use this function in HMAC to exclude all CSRs or fields with + names starting with "key": + ```systemverilog + csr_excl.add_excl({scope, ".", "key?"}, CsrExclWrite); + ``` + +* `has_excl`: Check if the CSR has a match in the existing exclusions loopup, + and is not intended to use externally + +### CSR sequence framework +The [cip_lib]({{< relref "hw/dv/sv/cip_lib/doc" >}}) includes a virtual sequence named `cip_base_vseq`, +that provides a common framework for all testbenchs to run these CSR test sequences and +add exclusions. diff --git a/vendor/lowrisc_ip/csr_utils/csr_excl_item.sv b/vendor/lowrisc_ip/csr_utils/csr_excl_item.sv new file mode 100644 index 00000000..9dd88b1e --- /dev/null +++ b/vendor/lowrisc_ip/csr_utils/csr_excl_item.sv @@ -0,0 +1,111 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// Class: csr_excl_item +// Description: CSR exclusion item that holds exclusions applied for a given set of blocks / +// registers / fields provided and maintained as strings. +class csr_excl_item extends uvm_object; + `uvm_object_utils(csr_excl_item) + + typedef struct { + int csr_test_type; + csr_excl_type_e csr_excl_type; + } csr_excl_s; + local csr_excl_s exclusions[string]; + + `uvm_object_new + + // add exclusion for an individual block, csr or field + // arg obj: this is the hierarchical path name to the block, csr or field - passing * and ? + // wildcards for glob style matching is allowed. User needs to take care that wildcards does not + // end up inadvertently matching more that what was desired. Examples: + // To exclude ral.ctrl.tx field from writes, obj can be "ral.ctrl.tx" or "*.ctrl.tx"; passing + // "*.tx" might be too generic + virtual function void add_excl(string obj, + csr_excl_type_e csr_excl_type, + csr_test_type_e csr_test_type = CsrAllTests); + bit [2:0] val = CsrNoExcl; + bit [NUM_CSR_TESTS-1:0] test = CsrInvalidTest; + csr_excl_s csr_excl_item; + if (csr_test_type == CsrInvalidTest) begin + `uvm_fatal(`gfn, $sformatf("add %s exclusion without a test", obj)) + end + val = csr_excl_type | exclusions[obj].csr_excl_type; + test = csr_test_type | exclusions[obj].csr_test_type; + exclusions[obj].csr_excl_type = csr_excl_type_e'(val); + exclusions[obj].csr_test_type = test; + endfunction + + // function to check if given blk / csr or field AND its parent has been excluded with the + // supplied exclusion type + // arg uvm_object obj: given blk, csr or field + // arg csr_excl_type_e csr_excl_type: exclusion type + function bit is_excl(uvm_object obj, + csr_excl_type_e csr_excl_type, + csr_test_type_e csr_test_type); + uvm_reg_block blk; + uvm_reg csr; + + // if supplied obj is a uvm_reg_block or uvm_reg, then its parent is a uvm_reg_block + // check if obj's parent is excluded + if ($cast(blk, obj)) begin + if (blk.get_parent() != null) begin + blk = blk.get_parent(); + if (has_excl(blk.`gfn, csr_excl_type, csr_test_type)) return 1'b1; + end + end + if ($cast(csr, obj)) begin + blk = csr.get_parent(); + if (has_excl(blk.`gfn, csr_excl_type, csr_test_type)) return 1'b1; + end + // TODO: check if any parent in the hierarchy above is excluded + // check if obj is excluded + return (has_excl(obj.`gfn, csr_excl_type, csr_test_type)); + endfunction + + // check if applied string obj has a match in existing exclusions lookup in defined csr_test_type + // function is to not be called externally + local function bit has_excl(string obj, + csr_excl_type_e csr_excl_type, + csr_test_type_e csr_test_type); + // check if obj exists verbatim + if (exclusions.exists(obj)) begin + `uvm_info(`gfn, $sformatf("has_excl: found exact excl match for %0s: %0s", + obj, exclusions[obj].csr_excl_type.name()), UVM_DEBUG) + // check if bit(s) corresponding to csr_excl_type are set in defined csr_test_type + if ((exclusions[obj].csr_test_type & csr_test_type) != CsrInvalidTest) begin + if ((exclusions[obj].csr_excl_type & csr_excl_type) != CsrNoExcl) return 1'b1; + end + end + else begin + // attempt glob style matching + foreach (exclusions[str]) begin + if (!uvm_re_match(str, obj)) begin + `uvm_info(`gfn, $sformatf("has_excl: found glob excl match for %0s(%0s): %0s", + obj, str, exclusions[str].csr_excl_type.name()), UVM_DEBUG) + // check if bit(s) corresponding to csr_excl_type are set in defined csr_test_type + if ((exclusions[str].csr_test_type & csr_test_type) != CsrInvalidTest) begin + if ((exclusions[str].csr_excl_type & csr_excl_type) != CsrNoExcl) return 1'b1; + end + end + end + end + return 1'b0; + endfunction + + // print all exclusions for ease of debug (call this ideally after adding all exclusions) + virtual function void print_exclusions(uvm_verbosity verbosity = UVM_HIGH); + string test_names; + for (int i = 0; i < NUM_CSR_TESTS; i++) begin + csr_test_type_e csr_test = csr_test_type_e'(1 << i); + test_names = {test_names, csr_test.name(), " "}; + end + foreach (exclusions[item]) begin + `uvm_info(`gfn, $sformatf("CSR/field [%0s] excluded with %0s in csr_tests: {%s}={%0b}", + item, exclusions[item].csr_excl_type.name(), test_names, + exclusions[item].csr_test_type), verbosity) + end + endfunction + +endclass diff --git a/vendor/lowrisc_ip/csr_utils/csr_seq_lib.sv b/vendor/lowrisc_ip/csr_utils/csr_seq_lib.sv new file mode 100644 index 00000000..47005c57 --- /dev/null +++ b/vendor/lowrisc_ip/csr_utils/csr_seq_lib.sv @@ -0,0 +1,477 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// CSR suite of sequences that do writes and reads to csrs +// includes hw_reset, rw, bit_bash and aliasing tests for csrs, and mem_walk for uvm_mems +// TODO: when mem backdoor is implemented, add uvm_mem_access_seq for backdoor rd +// The sequences perform csr writes and reads and follow the standard csr test suite. If external +// checker is enabled, then the external entity is required to update the mirrored value on +// writes. If not enabled, the sequences themselves call predict function to update the mirrored +// value. Consequently, the read values are checked against the mirrored value and not the +// previously written value. This approach is better since it takes care of special +// register and field access policies. Also, we use csr_rd_check task instead of csr_mirror to take +// field exclusions into account. +// +// Csrs to be tested is accumulated and shuffled from the supplied reg models. +// What / how many csrs to test can be further controlled in 3 ways - +// 1. Externally add specific csrs to test_csrs queue (highest prio) +// 2. Set num_test_csrs test a randomly picked set of csrs from the supplied models +// 3. Set / pass via plusarg, num_csr_chunks / test_csr_chunk +// +// Exclusions are to be provided using the csr_excl_item item (see class for more details). +class csr_base_seq extends uvm_reg_sequence #(uvm_sequence #(uvm_reg_item)); + `uvm_object_utils(csr_base_seq) + + uvm_reg_block models[$]; + uvm_reg all_csrs[$]; + uvm_reg test_csrs[$]; + csr_excl_item m_csr_excl_item; + + // By default, assume external checker (example, scoreboard) is turned off. If that is the case, + // then writes are followed by call to predict function to update the mirrored value. Reads are + // then checked against the mirrored value using csr_rd_check task. If external checker is + // enabled, then we let the external checker do the predict and compare. + // In either case, we should be able to do completely non-blocking writes and reads. + bit external_checker = 1'b0; + + // either use num_test_csrs or {test_csr_chunk, num_csr_chunks} to test slice of all csrs + int num_test_csrs = 0; + int test_csr_chunk = 1; + int num_csr_chunks = 1; + + `uvm_object_new + + // pre_start + virtual task pre_start(); + super.pre_start(); + + // create test_csrs list only if its empty + if (test_csrs.size() == 0) set_csr_test_range(); + + // create dummy m_csr_excl_item if not supplied + if (m_csr_excl_item == null) begin + `uvm_info(`gtn, "m_csr_excl_item is null, creating a dummy one locally", UVM_LOW) + m_csr_excl_item = csr_excl_item::type_id::create("m_csr_excl_item"); + end + endtask + + // post_start + virtual task post_start(); + super.post_start(); + wait_no_outstanding_access(); + test_csrs.delete(); + endtask + + function void set_csr_excl_item(csr_excl_item item); + this.m_csr_excl_item = item; + endfunction + + // extract csrs and split and prune to a specified test_csr_chunk + virtual function void set_csr_test_range(); + int start_idx; + int end_idx; + int chunk_size; + + // extract all csrs from the model + // TODO: add and use function here instead that allows pre filtering csrs + all_csrs.delete(); + foreach (models[i]) begin + models[i].get_registers(all_csrs); + end + + if (num_test_csrs != 0) begin + num_csr_chunks = all_csrs.size / num_test_csrs + 1; + `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(test_csr_chunk, + test_csr_chunk inside {[1:num_csr_chunks]};) + end + else begin + // extract test_csr_chunk, num_csr_chunks from plusargs + void'($value$plusargs("test_csr_chunk=%0d", test_csr_chunk)); + void'($value$plusargs("num_csr_chunks=%0d", num_csr_chunks)); + end + + if (!(test_csr_chunk inside {[1:num_csr_chunks]})) begin + `uvm_fatal(`gtn, $sformatf({{"invalid opt +test_csr_chunk=%0d, +num_csr_chunks=%0d "}, + {"(1 <= test_csr_chunk <= num_csr_chunks)"}}, + test_csr_chunk, num_csr_chunks)) + end + chunk_size = (num_test_csrs != 0) ? num_test_csrs : (all_csrs.size / num_csr_chunks + 1); + start_idx = (test_csr_chunk - 1) * chunk_size; + end_idx = test_csr_chunk * chunk_size; + if (end_idx >= all_csrs.size()) + end_idx = all_csrs.size() - 1; + + test_csrs = all_csrs[start_idx:end_idx]; + `uvm_info(`gtn, $sformatf("testing %0d csrs [%0d - %0d] in all supplied models", + test_csrs.size(), start_idx, end_idx), UVM_MEDIUM) + foreach (test_csrs[i]) begin + `uvm_info(`gtn, $sformatf("test_csrs list: %0s, reset: 0x%0x", test_csrs[i].get_full_name(), + test_csrs[i].get_mirrored_value()), UVM_HIGH) + end + test_csrs.shuffle(); + endfunction + +endclass + +//-------------------------------------------------------------------------------------------------- +// Class: csr_hw_reset_seq +// Brief Description: This sequence reads all CSRs and checks it against the reset value provided +// in the RAL specification. Note that this does not sufficiently qualify as the CSR HW reset test. +// The 'full' CSR HW reset test is constructed externally by running the csr_write_seq below first, +// issuing reset and only then running this sequence. +//-------------------------------------------------------------------------------------------------- +class csr_hw_reset_seq extends csr_base_seq; + `uvm_object_utils(csr_hw_reset_seq) + + `uvm_object_new + + virtual task body(); + foreach (test_csrs[i]) begin + uvm_reg_data_t compare_mask; + + // check if parent block or register is excluded from init check + if (m_csr_excl_item.is_excl(test_csrs[i], CsrExclInitCheck, CsrHwResetTest)) begin + `uvm_info(`gtn, $sformatf("Skipping register %0s due to CsrExclInitCheck exclusion", + test_csrs[i].get_full_name()), UVM_MEDIUM) + continue; + end + + `uvm_info(`gtn, $sformatf("Verifying reset value of register %0s", + test_csrs[i].get_full_name()), UVM_MEDIUM) + + compare_mask = get_mask_excl_fields(test_csrs[i], CsrExclInitCheck, CsrHwResetTest, + m_csr_excl_item); + csr_rd_check(.ptr (test_csrs[i]), + .blocking (0), + .compare (!external_checker), + .compare_vs_ral(1'b1), + .compare_mask (compare_mask)); + end + endtask + +endclass + +//-------------------------------------------------------------------------------------------------- +// Class: csr_write_seq +// Brief Description: This sequence writes a random value to all CSRs. It does not perform any +// checks. It is run as the first step of the CSR HW reset test. +//-------------------------------------------------------------------------------------------------- +class csr_write_seq extends csr_base_seq; + `uvm_object_utils(csr_write_seq) + + `uvm_object_new + + virtual task body(); + uvm_reg_data_t wdata; + + foreach (test_csrs[i]) begin + // check if parent block or register is excluded from write + if (m_csr_excl_item.is_excl(test_csrs[i], CsrExclWrite, CsrHwResetTest)) begin + `uvm_info(`gtn, $sformatf("Skipping register %0s due to CsrExclWrite exclusion", + test_csrs[i].get_full_name()), UVM_MEDIUM) + continue; + end + + `uvm_info(`gtn, $sformatf("Writing random data to register %0s", + test_csrs[i].get_full_name()), UVM_MEDIUM) + + `DV_CHECK_STD_RANDOMIZE_FATAL(wdata) + wdata &= get_mask_excl_fields(test_csrs[i], CsrExclWrite, CsrHwResetTest, m_csr_excl_item); + csr_wr(.csr(test_csrs[i]), .value(wdata), .blocking(0)); + end + endtask + +endclass + + +//-------------------------------------------------------------------------------------------------- +// Class: csr_rw_seq +// Brief Description: This seq writes a random value to a CSR and reads it back. The read value +// is checked for correctness while adhering to its access policies. A random choice is made between +// reading back the CSR as a whole or reading fields individually, so that partial accesses are made +// into the DUT as well. +//-------------------------------------------------------------------------------------------------- +class csr_rw_seq extends csr_base_seq; + `uvm_object_utils(csr_rw_seq) + + `uvm_object_new + + rand bit do_csr_rd_check; + rand bit do_csr_field_rd_check; + + constraint csr_or_field_rd_check_c { + // at least one of them should be set + do_csr_rd_check || do_csr_field_rd_check; + } + + virtual task body(); + foreach (test_csrs[i]) begin + uvm_reg_data_t wdata; + uvm_reg_data_t compare_mask; + uvm_reg_field test_fields[$]; + + // check if parent block or register is excluded from write + if (m_csr_excl_item.is_excl(test_csrs[i], CsrExclWrite, CsrRwTest)) begin + `uvm_info(`gtn, $sformatf("Skipping register %0s due to CsrExclWrite exclusion", + test_csrs[i].get_full_name()), UVM_MEDIUM) + continue; + end + + `uvm_info(`gtn, $sformatf("Verifying register read/write for %0s", + test_csrs[i].get_full_name()), UVM_MEDIUM) + + `DV_CHECK_FATAL(randomize(do_csr_rd_check, do_csr_field_rd_check)) + `DV_CHECK_STD_RANDOMIZE_FATAL(wdata) + wdata &= get_mask_excl_fields(test_csrs[i], CsrExclWrite, CsrRwTest, m_csr_excl_item); + + // if external checker is not enabled and writes are made non-blocking, then we need to + // pre-predict so that the mirrored value will be updated. if we dont, then csr_rd_check task + // might pick up stale mirrored value + // the pre-predict also needs to happen after the register is being written, to make sure the + // register is getting the updated access information. + csr_wr(.csr(test_csrs[i]), .value(wdata), .blocking(0), .predict(!external_checker)); + + // check if parent block or register is excluded from read-check + if (m_csr_excl_item.is_excl(test_csrs[i], CsrExclWriteCheck, CsrRwTest)) begin + `uvm_info(`gtn, $sformatf("Skipping register %0s due to CsrExclWriteCheck exclusion", + test_csrs[i].get_full_name()), UVM_MEDIUM) + continue; + end + + compare_mask = get_mask_excl_fields(test_csrs[i], CsrExclWriteCheck, CsrRwTest, + m_csr_excl_item); + if (do_csr_rd_check) begin + csr_rd_check(.ptr (test_csrs[i]), + .blocking (0), + .compare (!external_checker), + .compare_vs_ral(1'b1), + .compare_mask (compare_mask)); + end + if (do_csr_field_rd_check) begin + test_csrs[i].get_fields(test_fields); + test_fields.shuffle(); + foreach (test_fields[j]) begin + bit compare = !m_csr_excl_item.is_excl(test_fields[j], CsrExclWriteCheck, CsrRwTest); + csr_rd_check(.ptr (test_fields[j]), + .blocking (0), + .compare (!external_checker && compare), + .compare_vs_ral(1'b1)); + end + end + end + endtask + +endclass + +//-------------------------------------------------------------------------------------------------- +// Class: csr_bit_bash_seq +// Brief Description: This sequence walks a 1 through each CSR by writing one bit at a time and +// reading the CSR back. The read value is checked for correctness while adhering to its access +// policies. This verifies that there is no aliasing within the fields / bits of a CSR. +//-------------------------------------------------------------------------------------------------- +class csr_bit_bash_seq extends csr_base_seq; + `uvm_object_utils(csr_bit_bash_seq) + + `uvm_object_new + + virtual task body(); + foreach (test_csrs[i]) begin + // check if parent block or register is excluded from write + if (m_csr_excl_item.is_excl(test_csrs[i], CsrExclWrite, CsrBitBashTest) || + m_csr_excl_item.is_excl(test_csrs[i], CsrExclWriteCheck, CsrBitBashTest)) begin + `uvm_info(`gtn, $sformatf("Skipping register %0s due to CsrExclWrite/WriteCheck exclusion", + test_csrs[i].get_full_name()), UVM_MEDIUM) + continue; + end + + `uvm_info(`gtn, $sformatf("Verifying register bit bash for %0s", + test_csrs[i].get_full_name()), UVM_MEDIUM) + + begin + uvm_reg_field fields[$]; + string mode[`UVM_REG_DATA_WIDTH]; + uvm_reg_data_t dc_mask; // dont write or read + uvm_reg_data_t cmp_mask; // read but dont compare + int n_bits; + string field_access; + int next_lsb; + + n_bits = test_csrs[i].get_n_bytes() * 8; + + // Let's see what kind of bits we have... + test_csrs[i].get_fields(fields); + + next_lsb = 0; + dc_mask = 0; + cmp_mask = 0; + + foreach (fields[j]) begin + int lsb, w, dc, cmp; + + field_access = fields[j].get_access(test_csrs[i].get_default_map()); + cmp = (fields[j].get_compare() == UVM_NO_CHECK); + lsb = fields[j].get_lsb_pos(); + w = fields[j].get_n_bits(); + + // Exclude write-only fields from compare because you are not supposed to read them + case (field_access) + "WO", "WOC", "WOS", "WO1", "NOACCESS", "": cmp = 1; + endcase + + // skip fields that are wr-excluded + if (m_csr_excl_item.is_excl(fields[j], CsrExclWrite, CsrBitBashTest)) begin + `uvm_info(`gtn, $sformatf("Skipping field %0s due to CsrExclWrite exclusion", + fields[j].get_full_name()), UVM_MEDIUM) + dc = 1; + end + + // ignore fields that are init or rd-excluded + cmp = m_csr_excl_item.is_excl(fields[j], CsrExclInitCheck, CsrBitBashTest) || + m_csr_excl_item.is_excl(fields[j], CsrExclWriteCheck, CsrBitBashTest) ; + + // Any unused bits on the right side of the LSB? + while (next_lsb < lsb) mode[next_lsb++] = "RO"; + + repeat (w) begin + mode[next_lsb] = field_access; + dc_mask[next_lsb] = dc; + cmp_mask[next_lsb] = cmp; + next_lsb++; + end + end + + // Any unused bits on the left side of the MSB? + while (next_lsb < `UVM_REG_DATA_WIDTH) + mode[next_lsb++] = "RO"; + + // Bash the kth bit + for (int k = 0; k < n_bits; k++) begin + // Cannot test unpredictable bit behavior + if (dc_mask[k]) continue; + bash_kth_bit(test_csrs[i], k, mode[k], cmp_mask); + end + end + end + + endtask + + task bash_kth_bit(uvm_reg rg, + int k, + string mode, + uvm_reg_data_t mask); + + uvm_reg_data_t val; + string err_msg; + + `uvm_info(`gtn, $sformatf("bashing %0s bit #%0d", mode, k), UVM_HIGH) + repeat (2) begin + val = rg.get(); + val[k] = ~val[k]; + err_msg = $sformatf("Wrote %0s[%0d]: %0b", rg.get_full_name(), k, val[k]); + csr_wr(.csr(rg), .value(val), .blocking(1)); + + // if external checker is not enabled and writes are made non-blocking, then we need to + // pre-predict so that the mirrored value will be updated. if we dont, then csr_rd_check task + // might pick up stale mirrored value + if (!external_checker) begin + void'(rg.predict(.value(val), .kind(UVM_PREDICT_WRITE))); + end + + // TODO, outstanding access to same reg isn't supported in uvm_reg. Need to add another seq + // uvm_reg waits until transaction is completed, before start another read/write in same reg + csr_rd_check(.ptr (rg), + .blocking (0), + .compare (!external_checker), + .compare_vs_ral(1'b1), + .compare_mask (~mask), + .err_msg (err_msg)); + end + endtask: bash_kth_bit + +endclass + +//-------------------------------------------------------------------------------------------------- +// Class: csr_aliasing_seq +// Brief Description: For each CSR, this sequence writes a random value to it and reads ALL CSRs +// back. The read value of the CSR that was written is checked for correctness while adhering to its +// access policies. The read value of all other CSRs are compared against their previous values. +// This verifies that there is no aliasing across the address bits within the valid CSR space. +//-------------------------------------------------------------------------------------------------- +class csr_aliasing_seq extends csr_base_seq; + `uvm_object_utils(csr_aliasing_seq) + + `uvm_object_new + + virtual task body(); + foreach(test_csrs[i]) begin + uvm_reg_data_t wdata; + + // check if parent block or register is excluded + if (m_csr_excl_item.is_excl(test_csrs[i], CsrExclWrite, CsrAliasingTest)) begin + `uvm_info(`gtn, $sformatf("Skipping register %0s due to CsrExclWrite exclusion", + test_csrs[i].get_full_name()), UVM_MEDIUM) + continue; + end + + `uvm_info(`gtn, $sformatf("Verifying register aliasing for %0s", + test_csrs[i].get_full_name()), UVM_MEDIUM) + + `DV_CHECK_STD_RANDOMIZE_FATAL(wdata) + wdata &= get_mask_excl_fields(test_csrs[i], CsrExclWrite, CsrAliasingTest, m_csr_excl_item); + csr_wr(.csr(test_csrs[i]), .value(wdata), .blocking(0)); + + // if external checker is not enabled and writes are made non-blocking, then we need to + // pre-predict so that the mirrored value will be updated. if we dont, then csr_rd_check task + // might pick up stale mirrored value + if (!external_checker) begin + void'(test_csrs[i].predict(.value(wdata), .kind(UVM_PREDICT_WRITE))); + end + + all_csrs.shuffle(); + foreach (all_csrs[j]) begin + uvm_reg_data_t compare_mask; + + // check if parent block or register is excluded + if (m_csr_excl_item.is_excl(all_csrs[j], CsrExclInitCheck, CsrAliasingTest) || + m_csr_excl_item.is_excl(all_csrs[j], CsrExclWriteCheck, CsrAliasingTest)) begin + `uvm_info(`gtn, $sformatf("Skipping register %0s due to CsrExclInit/WriteCheck exclusion", + all_csrs[j].get_full_name()), UVM_MEDIUM) + continue; + end + + compare_mask = get_mask_excl_fields(all_csrs[j], CsrExclWriteCheck, CsrAliasingTest, + m_csr_excl_item); + csr_rd_check(.ptr (all_csrs[j]), + .blocking (0), + .compare (!external_checker), + .compare_vs_ral(1'b1), + .compare_mask (compare_mask)); + end + wait_no_outstanding_access(); + end + endtask + +endclass + +//-------------------------------------------------------------------------------------------------- +// Class: csr_mem_walk_seq +// Brief Description: This seq walks through each address of the memory by running the default +// UVM mem walk sequence. +//-------------------------------------------------------------------------------------------------- +class csr_mem_walk_seq extends csr_base_seq; + uvm_mem_walk_seq mem_walk_seq; + + `uvm_object_utils(csr_mem_walk_seq) + + `uvm_object_new + + virtual task body(); + mem_walk_seq = uvm_mem_walk_seq::type_id::create("mem_walk_seq"); + foreach (models[i]) begin + mem_walk_seq.model = models[i]; + mem_walk_seq.start(null); + end + endtask : body + +endclass diff --git a/vendor/lowrisc_ip/csr_utils/csr_utils.core b/vendor/lowrisc_ip/csr_utils/csr_utils.core new file mode 100644 index 00000000..adec6549 --- /dev/null +++ b/vendor/lowrisc_ip/csr_utils/csr_utils.core @@ -0,0 +1,21 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:csr_utils" +description: "CSR utilities" + +filesets: + files_dv: + depend: + - lowrisc:dv:dv_utils + files: + - csr_utils_pkg.sv + - csr_excl_item.sv: {is_include_file: true} + - csr_seq_lib.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/csr_utils/csr_utils_pkg.sv b/vendor/lowrisc_ip/csr_utils/csr_utils_pkg.sv new file mode 100644 index 00000000..9fbfde42 --- /dev/null +++ b/vendor/lowrisc_ip/csr_utils/csr_utils_pkg.sv @@ -0,0 +1,571 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package csr_utils_pkg; + // dep packages + import uvm_pkg::*; + import dv_utils_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // local types and variables + uint outstanding_accesses = 0; + uint default_timeout_ns = 1_000_000; // 1ms + uint default_spinwait_timeout_ns = 10_000_000; // 10ms + string msg_id = "csr_utils"; + bit default_csr_blocking = 1; + bit under_reset = 0; + + // global paramters for number of csr tests (including memory test) + parameter uint NUM_CSR_TESTS = 4; + + // csr field struct - hold field specific params + typedef struct { + uvm_reg csr; + uvm_reg_field field; + uvm_reg_data_t mask; + uint shift; + } csr_field_s; + + // csr test types + typedef enum bit [NUM_CSR_TESTS-1:0] { + CsrInvalidTest = 4'h0, + // elementary test types + CsrHwResetTest = 4'h1, + CsrRwTest = 4'h2, + CsrBitBashTest = 4'h4, + CsrAliasingTest = 4'h8, + // combinational test types (combinations of the above), used for exclusion tagging + CsrNonInitTests = 4'he, // all but HwReset test + CsrAllTests = 4'hf // all tests + } csr_test_type_e; + + // csr exclusion indications + typedef enum bit [2:0] { + CsrNoExcl = 3'b000, // no exclusions + CsrExclInitCheck = 3'b001, // exclude csr from init val check + CsrExclWriteCheck = 3'b010, // exclude csr from write-read check + CsrExclCheck = 3'b011, // exclude csr from init or write-read check + CsrExclWrite = 3'b100, // exclude csr from write + CsrExclAll = 3'b111 // exclude csr from init or write or writ-read check + } csr_excl_type_e; + + function automatic void increment_outstanding_access(); + outstanding_accesses++; + endfunction + + function automatic void decrement_outstanding_access(); + outstanding_accesses--; + endfunction + + task automatic wait_no_outstanding_access(); + wait(outstanding_accesses == 0); + endtask + + function automatic void clear_outstanding_access(); + outstanding_accesses = 0; + endfunction + + function automatic void reset_asserted(); + under_reset = 1; + endfunction + + function automatic void reset_deasserted(); + under_reset = 0; + endfunction + + // Get all valid csr addrs - useful to check if incoming addr falls in the csr range. + function automatic void get_csr_addrs(input uvm_reg_block ral, ref uvm_reg_addr_t csr_addrs[$]); + uvm_reg csrs[$]; + ral.get_registers(csrs); + csr_addrs.delete(); + foreach (csrs[i]) begin + csr_addrs.push_back(csrs[i].get_address()); + end + endfunction + + // Get all valid mem addr ranges - useful to check if incoming addr falls in the mem range. + function automatic void get_mem_addr_ranges(uvm_reg_block ral, ref addr_range_t mem_ranges[$]); + uvm_mem mems[$]; + ral.get_memories(mems); + mems.delete(); + foreach (mems[i]) begin + addr_range_t mem_range; + mem_range.start_addr = mems[i].get_address(); + mem_range.end_addr = mem_range.start_addr + + mems[i].get_size() * mems[i].get_n_bytes() - 1; + mem_ranges.push_back(mem_range); + end + endfunction + + // This fucntion return mirrored value of reg/field of given RAL + function automatic uvm_reg_data_t get_reg_fld_mirror_value(uvm_reg_block ral, + string reg_name, + string field_name = ""); + uvm_reg csr; + uvm_reg_field fld; + uvm_reg_data_t result; + string msg_id = {csr_utils_pkg::msg_id, "::get_reg_fld_mirror_value"}; + csr = ral.get_reg_by_name(reg_name); + `DV_CHECK_NE_FATAL(csr, null, "", msg_id) + // return field mirror value if field_name is passed, else return reg mirror value + if (field_name != "") begin + fld = csr.get_field_by_name(field_name); + `DV_CHECK_NE_FATAL(fld, null, "", msg_id) + result = fld.get_mirrored_value(); + end + else begin + result = csr.get_mirrored_value(); + end + return result; + endfunction : get_reg_fld_mirror_value + + // This function attempts to cast a given uvm_object ptr into uvm_reg or uvm_reg_field. If cast + // is successful on either, then set the appropriate csr_field_s return values. + function automatic csr_field_s decode_csr_or_field(input uvm_object ptr); + uvm_reg csr; + uvm_reg_field fld; + csr_field_s result; + string msg_id = {csr_utils_pkg::msg_id, "::decode_csr_or_field"}; + + if ($cast(csr, ptr)) begin + // return csr object with null field; set the mask to all 1s and shift to 0 + result.csr = csr; + result.mask = '1; + result.shift = 0; + end + else if ($cast(fld, ptr)) begin + // return csr field object; return the appropriate mask and shift values + result.csr = fld.get_parent(); + result.field = fld; + result.mask = (1 << fld.get_n_bits()) - 1; + result.shift = fld.get_lsb_pos(); + end + else begin + `uvm_fatal(msg_id, $sformatf("ptr %0s is not of type uvm_reg or uvm_reg_field", + ptr.get_full_name())) + end + return result; + endfunction : decode_csr_or_field + + // mask and shift data to extract the value specific to that supplied field + function automatic uvm_reg_data_t get_field_val(uvm_reg_field field, + uvm_reg_data_t value); + uvm_reg_data_t mask = (1 << field.get_n_bits()) - 1; + uint shift = field.get_lsb_pos(); + get_field_val = (value >> shift) & mask; + endfunction + + // get updated reg value by using new specific field value + function automatic uvm_reg_data_t get_csr_val_with_updated_field(uvm_reg_field field, + uvm_reg_data_t csr_value, + uvm_reg_data_t field_value); + uvm_reg_data_t mask = (1 << field.get_n_bits()) - 1; + uint shift = field.get_lsb_pos(); + csr_value = csr_value & ~(mask << shift) | ((mask & field_value) << shift); + return csr_value; + endfunction + + // wait until current csr op is complete + task automatic csr_wait(input uvm_reg csr); + `uvm_info(msg_id, $sformatf("%0s: wait_busy: %0b", + csr.get_full_name(), csr.m_is_busy), UVM_HIGH) + wait(csr.m_is_busy == 1'b0); + `uvm_info(msg_id, $sformatf("%0s: done wait_busy: %0b", + csr.get_full_name(), csr.m_is_busy), UVM_HIGH) + endtask + + task automatic csr_update(input uvm_reg csr, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input bit blocking = default_csr_blocking, + input uint timeout_ns = default_timeout_ns, + input uvm_reg_map map = null); + if (blocking) begin + csr_update_sub(csr, check, path, timeout_ns, map); + end else begin + fork + csr_update_sub(csr, check, path, timeout_ns, map); + join_none + // Add #0 to ensure that this thread starts executing before any subsequent call + #0; + end + endtask + + // subroutine of csr_update, don't use it directly + task automatic csr_update_sub(input uvm_reg csr, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input uint timeout_ns = default_timeout_ns, + input uvm_reg_map map = null); + fork + begin : isolation_fork + uvm_status_e status; + string msg_id = {csr_utils_pkg::msg_id, "::csr_update"}; + + fork + begin + increment_outstanding_access(); + csr.update(.status(status), .path(path), .map(map), .prior(100)); + if (check == UVM_CHECK) begin + `DV_CHECK_EQ(status, UVM_IS_OK, "", error, msg_id) + end + decrement_outstanding_access(); + end + begin + wait_timeout(timeout_ns, msg_id, + $sformatf("Timeout waiting to csr_update %0s (addr=0x%0h)", + csr.get_full_name(), csr.get_address())); + end + join_any + disable fork; + end : isolation_fork + join + endtask + + task automatic csr_wr(input uvm_reg csr, + input uvm_reg_data_t value, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input bit blocking = default_csr_blocking, + input uint timeout_ns = default_timeout_ns, + input bit predict = 0, + input uvm_reg_map map = null); + if (blocking) begin + csr_wr_sub(csr, value, check, path, timeout_ns, map); + if (predict) void'(csr.predict(.value(value), .kind(UVM_PREDICT_WRITE))); + end else begin + fork + begin + csr_wr_sub(csr, value, check, path, timeout_ns, map); + // predict after csr_wr_sub, to ensure predict after enable register overwrite the locked + // registers' access information + if (predict) void'(csr.predict(.value(value), .kind(UVM_PREDICT_WRITE))); + end + join_none + // Add #0 to ensure that this thread starts executing before any subsequent call + #0; + end + endtask + + // subroutine of csr_wr, don't use it directly + task automatic csr_wr_sub(input uvm_reg csr, + input uvm_reg_data_t value, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input uint timeout_ns = default_timeout_ns, + input uvm_reg_map map = null); + fork + begin : isolation_fork + uvm_status_e status; + string msg_id = {csr_utils_pkg::msg_id, "::csr_wr"}; + + fork + begin + increment_outstanding_access(); + csr.write(.status(status), .value(value), .path(path), .map(map), .prior(100)); + if (check == UVM_CHECK) begin + `DV_CHECK_EQ(status, UVM_IS_OK, "", error, msg_id) + end + decrement_outstanding_access(); + end + begin + wait_timeout(timeout_ns, msg_id, + $sformatf("Timeout waiting to csr_wr %0s (addr=0x%0h)", + csr.get_full_name(), csr.get_address())); + end + join_any + disable fork; + end : isolation_fork + join + endtask + + task automatic csr_rd(input uvm_object ptr, // accept reg or field + output uvm_reg_data_t value, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input bit blocking = default_csr_blocking, + input uint timeout_ns = default_timeout_ns, + input uvm_reg_map map = null); + if (blocking) begin + csr_rd_sub(ptr, value, check, path, timeout_ns, map); + end else begin + fork + csr_rd_sub(ptr, value, check, path, timeout_ns, map); + join_none + // Add #0 to ensure that this thread starts executing before any subsequent call + #0; + end + endtask + + // subroutine of csr_rd, don't use it directly + task automatic csr_rd_sub(input uvm_object ptr, // accept reg or field + output uvm_reg_data_t value, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input uint timeout_ns = default_timeout_ns, + input uvm_reg_map map = null); + fork + begin : isolation_fork + csr_field_s csr_or_fld; + uvm_status_e status; + string msg_id = {csr_utils_pkg::msg_id, "::csr_rd"}; + + fork + begin + increment_outstanding_access(); + csr_or_fld = decode_csr_or_field(ptr); + if (csr_or_fld.field != null) begin + csr_or_fld.field.read(.status(status), .value(value), .path(path), .map(map), + .prior(100)); + end else begin + csr_or_fld.csr.read(.status(status), .value(value), .path(path), .map(map), + .prior(100)); + end + if (check == UVM_CHECK) begin + `DV_CHECK_EQ(status, UVM_IS_OK, "", error, msg_id) + end + decrement_outstanding_access(); + end + begin + wait_timeout(timeout_ns, msg_id, + $sformatf("Timeout waiting to csr_rd %0s (addr=0x%0h)", + ptr.get_full_name(), csr_or_fld.csr.get_address())); + end + join_any + disable fork; + end : isolation_fork + join + endtask + + task automatic csr_rd_check(input uvm_object ptr, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input bit blocking = default_csr_blocking, + input uint timeout_ns = default_timeout_ns, + input bit compare = 1'b1, + input bit compare_vs_ral = 1'b0, + input uvm_reg_data_t compare_mask = '1, + input uvm_reg_data_t compare_value = 0, + input string err_msg = "", + input uvm_reg_map map = null); + fork + begin : isolation_fork + fork + begin + csr_field_s csr_or_fld; + uvm_status_e status; + uvm_reg_data_t obs; + uvm_reg_data_t exp; + string msg_id = {csr_utils_pkg::msg_id, "::csr_rd_check"}; + + increment_outstanding_access(); + csr_or_fld = decode_csr_or_field(ptr); + + csr_rd(.ptr(ptr), .value(obs), .check(check), .path(path), + .blocking(1), .timeout_ns(timeout_ns), .map(map)); + + // get mirrored value after read to make sure the read reg access is updated + if (csr_or_fld.field != null) begin + exp = csr_or_fld.field.get_mirrored_value(); + end else begin + exp = csr_or_fld.csr.get_mirrored_value(); + end + if (compare && !under_reset) begin + obs = obs & compare_mask; + exp = (compare_vs_ral ? exp : compare_value) & compare_mask; + `DV_CHECK_EQ(obs, exp, {"Regname: ", ptr.get_full_name(), " ", err_msg}, + error, msg_id) + end + decrement_outstanding_access(); + end + join_none + if (blocking) wait fork; + // Add #0 to ensure that this thread starts executing before any subsequent call + else #0; + end : isolation_fork + join + endtask + + // task to read all csrs and check against ral expected value. Mainly used after reset + task automatic read_and_check_all_csrs(input uvm_reg_block ral); + uvm_reg ral_csrs[$]; + ral.get_registers(ral_csrs); + ral_csrs.shuffle(); + + foreach (ral_csrs[i]) csr_rd_check(.ptr(ral_csrs[i]), .compare_vs_ral(1)); + endtask + + // poll a csr or csr field continuously until it reads the expected value. + task automatic csr_spinwait(input uvm_object ptr, + input uvm_reg_data_t exp_data, + input uvm_check_e check = UVM_CHECK, + input uvm_path_e path = UVM_DEFAULT_PATH, + input uvm_reg_map map = null, + input uint spinwait_delay_ns = 0, + input uint timeout_ns = default_spinwait_timeout_ns, + input compare_op_e compare_op = CompareOpEq, + input uvm_verbosity verbosity = UVM_HIGH); + fork + begin : isolation_fork + csr_field_s csr_or_fld; + uvm_reg_data_t read_data; + string msg_id = {csr_utils_pkg::msg_id, "::csr_spinwait"}; + + csr_or_fld = decode_csr_or_field(ptr); + fork + while (!under_reset) begin + if (spinwait_delay_ns) #(spinwait_delay_ns * 1ns); + csr_rd(.ptr(ptr), .value(read_data), .check(check), .path(path), + .blocking(1), .map(map)); + `uvm_info(msg_id, $sformatf("ptr %0s == 0x%0h", + ptr.get_full_name(), read_data), verbosity) + case (compare_op) + CompareOpEq: if (read_data == exp_data) break; + CompareOpCaseEq: if (read_data === exp_data) break; + CompareOpNe: if (read_data != exp_data) break; + CompareOpCaseNe: if (read_data !== exp_data) break; + CompareOpGt: if (read_data > exp_data) break; + CompareOpGe: if (read_data >= exp_data) break; + CompareOpLt: if (read_data < exp_data) break; + CompareOpLe: if (read_data <= exp_data) break; + default: begin + `uvm_fatal(ptr.get_full_name(), $sformatf("invalid operator:%0s", compare_op)) + end + endcase + end + begin + wait_timeout(timeout_ns, msg_id, $sformatf("timeout %0s (addr=0x%0h) == 0x%0h", + ptr.get_full_name(), csr_or_fld.csr.get_address(), exp_data)); + end + join_any + disable fork; + end : isolation_fork + join + endtask + + task automatic mem_rd(input uvm_mem ptr, + input int offset, + output bit[31:0] data, + input uvm_check_e check = UVM_CHECK, + input bit blocking = default_csr_blocking, + input uint timeout_ns = default_timeout_ns, + input uvm_reg_map map = null); + if (blocking) begin + mem_rd_sub(ptr, offset, data, check, timeout_ns, map); + end else begin + fork + mem_rd_sub(ptr, offset, data, check, timeout_ns, map); + join_none + // Add #0 to ensure that this thread starts executing before any subsequent call + #0; + end + endtask : mem_rd + + task automatic mem_rd_sub(input uvm_mem ptr, + input int offset, + output bit[31:0] data, + input uvm_check_e check = UVM_CHECK, + input uint timeout_ns = default_timeout_ns, + input uvm_reg_map map = null); + fork + begin : isolating_fork + uvm_status_e status; + string msg_id = {csr_utils_pkg::msg_id, "::mem_rd"}; + + fork + begin + increment_outstanding_access(); + ptr.read(.status(status), .offset(offset), .value(data), .map(map), .prior(100)); + if (check == UVM_CHECK) begin + `DV_CHECK_EQ(status, UVM_IS_OK, "", error, msg_id) + end + decrement_outstanding_access(); + end + begin : mem_rd_timeout + wait_timeout(timeout_ns, msg_id, + $sformatf("Timeout waiting to csr_rd %0s (addr=0x%0h)", + ptr.get_full_name(), offset)); + end + join_any + disable fork; + end : isolating_fork + join + endtask : mem_rd_sub + + task automatic mem_wr(input uvm_mem ptr, + input int offset, + input bit[31:0] data, + input bit blocking = default_csr_blocking, + input uint timeout_ns = default_timeout_ns, + input uvm_check_e check = UVM_CHECK, + input uvm_reg_map map = null); + if (blocking) begin + mem_wr_sub(ptr, offset, data, timeout_ns, check, map); + end else begin + fork + mem_wr_sub(ptr, offset, data, timeout_ns, check, map); + join_none + // Add #0 to ensure that this thread starts executing before any subsequent call + #0; + end + endtask : mem_wr + + task automatic mem_wr_sub(input uvm_mem ptr, + input int offset, + input bit[31:0] data, + input uint timeout_ns = default_timeout_ns, + input uvm_check_e check = UVM_CHECK, + input uvm_reg_map map = null); + fork + begin : isolation_fork + uvm_status_e status; + string msg_id = {csr_utils_pkg::msg_id, "::mem_wr"}; + + fork + begin + increment_outstanding_access(); + ptr.write(.status(status), .offset(offset), .value(data), .map(map), .prior(100)); + if (check == UVM_CHECK) begin + `DV_CHECK_EQ(status, UVM_IS_OK, "", error, msg_id) + end + decrement_outstanding_access(); + end + begin + wait_timeout(timeout_ns, msg_id, + $sformatf("Timeout waiting to csr_wr %0s (addr=0x%0h)", + ptr.get_full_name(), offset)); + end + join_any + disable fork; + end : isolation_fork + join + endtask : mem_wr_sub + + `include "csr_excl_item.sv" + + // Fields could be excluded from writes & reads - This function zeros out the excluded fields + function automatic uvm_reg_data_t get_mask_excl_fields(uvm_reg csr, + csr_excl_type_e csr_excl_type, + csr_test_type_e csr_test_type, + csr_excl_item m_csr_excl_item); + uvm_reg_field flds[$]; + csr.get_fields(flds); + get_mask_excl_fields = '1; + foreach (flds[i]) begin + if (m_csr_excl_item.is_excl(flds[i], csr_excl_type, csr_test_type)) begin + csr_field_s fld_params = decode_csr_or_field(flds[i]); + `uvm_info(msg_id, $sformatf("Skipping field %0s due to %0s exclusion", + flds[i].get_full_name(), csr_excl_type.name()), UVM_MEDIUM) + get_mask_excl_fields &= ~(fld_params.mask << fld_params.shift); + end + end + endfunction + + // sources + `include "csr_seq_lib.sv" + +endpackage diff --git a/vendor/lowrisc_ip/dv_lib/README.md b/vendor/lowrisc_ip/dv_lib/README.md new file mode 100644 index 00000000..a67a839f --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/README.md @@ -0,0 +1,56 @@ +--- +title: "DV Library Classes" +--- + +# DV library classes + +## Overview +The DV library classes form the base layer / framework for constructing UVM +testbenches. These classes provide features (settings, methods, hooks and other +constructs used in verification) that are generic enough to be reused across +all testbenches. + +In this doc, we will capture some of the most salient / frequently used features +in extended classes. These classes are being updated frequently. So, for a more +detailed understanding, please read the class definitions directly. + +The DV library classes fall into 3 categories - UVM RAL (register abstraction +layer), UVM agent, and UVM environment extensions. + +### UVM RAL extensions +The RAL model generated using the [reggen]({{< relref "util/reggen/README.md" >}}) tool +extend from these classes. These themselves extend from the corresponding RAL +classes provided in UVM. + +#### `dv_base_reg_field` +Currently, this class does not provide any additional features. One of the +features planned for future is setting exclusion tags at the field level for the +CSR suite of tests that will be extracted automatically from the Hjson-based +IP CSR specification. + +#### `dv_base_reg` +This class provides the following functions to support verification: +* `gen_n_used_bits()`: This function returns the actual number of bits used in + the CSR (sum of all available field widths). +* `get_msb_pos()`: This function returns the MSB bit position of all available + fields. CSR either ends at this bit (bit \`TL_DW - 1) or has reserved / invalid + bits beyond this bit. + +#### `dv_base_reg_block` +* ` build(uvm_reg_addr_t base_addr)`: This function is implemented as a pseudo + pure virtual function (returns a fatal error if called directly). It is used + for building the complete RAL model. For a polymorphic approach, the DV user + can use this class handle to create the extended (IP specific) class instance + and call this function to build the actual RAL model. This is exactly how it + is done in [dv_base_env_cfg](#dv_base_env_cfg). + +#### `dv_base_reg_map` +Currently, this class does not provide any additional features. Having this +extension provides an opportunity to add common features in future. + +### UVM Agent extensions +TODO + +### UVM Environment extensions +TODO + diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_agent.sv b/vendor/lowrisc_ip/dv_lib/dv_base_agent.sv new file mode 100644 index 00000000..9e70ea59 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_agent.sv @@ -0,0 +1,60 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_agent #(type CFG_T = dv_base_agent_cfg, + type DRIVER_T = dv_base_driver, + type HOST_DRIVER_T = DRIVER_T, + type DEVICE_DRIVER_T = DRIVER_T, + type SEQUENCER_T = dv_base_sequencer, + type MONITOR_T = dv_base_monitor, + type COV_T = dv_base_agent_cov) extends uvm_agent; + + `uvm_component_param_utils(dv_base_agent #(CFG_T, DRIVER_T, HOST_DRIVER_T, DEVICE_DRIVER_T, + SEQUENCER_T, MONITOR_T, COV_T)) + + CFG_T cfg; + COV_T cov; + DRIVER_T driver; + SEQUENCER_T sequencer; + MONITOR_T monitor; + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + // get CFG_T object from uvm_config_db + if (!uvm_config_db#(CFG_T)::get(this, "", "cfg", cfg)) begin + `uvm_fatal(`gfn, $sformatf("failed to get %s from uvm_config_db", cfg.get_type_name())) + end + `uvm_info(`gfn, $sformatf("\n%0s", cfg.sprint()), UVM_HIGH) + + // create components + if (cfg.en_cov) begin + cov = COV_T ::type_id::create("cov", this); + cov.cfg = cfg; + end + + monitor = MONITOR_T::type_id::create("monitor", this); + monitor.cfg = cfg; + monitor.cov = cov; + + if (cfg.is_active) begin + sequencer = SEQUENCER_T::type_id::create("sequencer", this); + sequencer.cfg = cfg; + + if (cfg.if_mode == Host) driver = HOST_DRIVER_T::type_id::create("driver", this); + else driver = DEVICE_DRIVER_T::type_id::create("driver", this); + driver.cfg = cfg; + end + endfunction + + function void connect_phase(uvm_phase phase); + super.connect_phase(phase); + if (cfg.is_active) begin + driver.seq_item_port.connect(sequencer.seq_item_export); + end + endfunction + +endclass + diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_agent_cfg.sv b/vendor/lowrisc_ip/dv_lib/dv_base_agent_cfg.sv new file mode 100644 index 00000000..830f388e --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_agent_cfg.sv @@ -0,0 +1,20 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_agent_cfg extends uvm_object; + + // agent cfg knobs + bit is_active = 1'b1; // active driver or passive monitor + bit en_cov = 1'b1; // enable coverage + if_mode_e if_mode; // interface mode - Host or Device + + `uvm_object_utils_begin(dv_base_agent_cfg) + `uvm_field_int (is_active, UVM_DEFAULT) + `uvm_field_int (en_cov, UVM_DEFAULT) + `uvm_field_enum(if_mode_e, if_mode, UVM_DEFAULT) + `uvm_object_utils_end + + `uvm_object_new + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_agent_cov.sv b/vendor/lowrisc_ip/dv_lib/dv_base_agent_cov.sv new file mode 100644 index 00000000..545f6764 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_agent_cov.sv @@ -0,0 +1,12 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_agent_cov #(type CFG_T = dv_base_agent_cfg) extends uvm_component; + `uvm_component_param_utils(dv_base_agent_cov #(CFG_T)) + + CFG_T cfg; + + `uvm_component_new + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_driver.sv b/vendor/lowrisc_ip/dv_lib/dv_base_driver.sv new file mode 100644 index 00000000..a6852358 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_driver.sv @@ -0,0 +1,32 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_driver #(type ITEM_T = uvm_sequence_item, + type CFG_T = dv_base_agent_cfg) extends uvm_driver #(ITEM_T); + `uvm_component_param_utils(dv_base_driver #()) + + bit under_reset; + CFG_T cfg; + + `uvm_component_new + + virtual task run_phase(uvm_phase phase); + fork + reset_signals(); + get_and_drive(); + join + endtask + + // reset signals + virtual task reset_signals(); + `uvm_fatal(`gfn, "this is implemented as pure virtual task - please extend") + endtask + + // drive trans received from sequencer + virtual task get_and_drive(); + `uvm_fatal(`gfn, "this is implemented as pure virtual task - please extend") + endtask + +endclass + diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_env.sv b/vendor/lowrisc_ip/dv_lib/dv_base_env.sv new file mode 100644 index 00000000..f616446b --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_env.sv @@ -0,0 +1,60 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_env #(type CFG_T = dv_base_env_cfg, + type VIRTUAL_SEQUENCER_T = dv_base_virtual_sequencer, + type SCOREBOARD_T = dv_base_scoreboard, + type COV_T = dv_base_env_cov) extends uvm_env; + `uvm_component_param_utils(dv_base_env #(CFG_T, VIRTUAL_SEQUENCER_T, SCOREBOARD_T, COV_T)) + + CFG_T cfg; + VIRTUAL_SEQUENCER_T virtual_sequencer; + SCOREBOARD_T scoreboard; + COV_T cov; + + `uvm_component_new + + virtual function void build_phase(uvm_phase phase); + super.build_phase(phase); + // get dv_base_env_cfg object from uvm_config_db + if (!uvm_config_db#(CFG_T)::get(this, "", "cfg", cfg)) begin + `uvm_fatal(`gfn, $sformatf("failed to get %s from uvm_config_db", cfg.get_type_name())) + end + + // get vifs + if (!uvm_config_db#(virtual clk_rst_if)::get(this, "", "clk_rst_vif", cfg.clk_rst_vif)) begin + `uvm_fatal(get_full_name(), "failed to get clk_rst_if from uvm_config_db") + end + cfg.clk_rst_vif.set_freq_mhz(cfg.clk_freq_mhz); + + // create components + if (cfg.en_cov) begin + cov = COV_T::type_id::create("cov", this); + cov.cfg = cfg; + end + + if (cfg.is_active) begin + virtual_sequencer = VIRTUAL_SEQUENCER_T::type_id::create("virtual_sequencer", this); + virtual_sequencer.cfg = cfg; + virtual_sequencer.cov = cov; + end + + // scb also monitors the reset and call cfg.reset_asserted/reset_deasserted for reset + scoreboard = SCOREBOARD_T::type_id::create("scoreboard", this); + scoreboard.cfg = cfg; + scoreboard.cov = cov; + endfunction + + virtual function void end_of_elaboration_phase(uvm_phase phase); + super.end_of_elaboration_phase(phase); + if (cfg.has_ral) begin + // Lock the ral model + cfg.ral.lock_model(); + // Get list of valid csr addresses (useful in seq to randomize addr as well as in scb checks) + get_csr_addrs(cfg.ral, cfg.csr_addrs); + get_mem_addr_ranges(cfg.ral, cfg.mem_ranges); + end + endfunction : end_of_elaboration_phase + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_env_cfg.sv b/vendor/lowrisc_ip/dv_lib/dv_base_env_cfg.sv new file mode 100644 index 00000000..2b74cf5c --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_env_cfg.sv @@ -0,0 +1,94 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_env_cfg #(type RAL_T = dv_base_reg_block) extends uvm_object; + + bit is_active = 1; + bit en_scb = 1; // can be changed at run-time + bit en_cov = 1; + bit has_ral = 1; + bit under_reset = 0; + + // bit to configure all uvcs with zero delays to create high bw test + rand bit zero_delays; + + // reg model & q of valid csr addresses + RAL_T ral; + bit [TL_AW-1:0] csr_addrs[$]; + addr_range_t mem_ranges[$]; + // mem access support, if not enabled, will trigger error + bit en_mem_byte_write = 0; + bit en_mem_read = 1; + + // ral base address and size + bit [TL_AW-1:0] csr_base_addr; // base address where csr map begins + bit [TL_AW:0] csr_addr_map_size; // csr addr region allocated to the ip, max: 1 << TL_AW + + // clk_rst_if & freq + virtual clk_rst_if clk_rst_vif; + rand clk_freq_mhz_e clk_freq_mhz; + + // set zero_delays 40% of the time + constraint zero_delays_c { + zero_delays dist {1'b0 := 6, 1'b1 := 4}; + } + + `uvm_object_param_utils_begin(dv_base_env_cfg #(RAL_T)) + `uvm_field_int (is_active, UVM_DEFAULT) + `uvm_field_int (en_scb, UVM_DEFAULT) + `uvm_field_int (en_cov, UVM_DEFAULT) + `uvm_field_int (zero_delays, UVM_DEFAULT) + `uvm_field_int (csr_base_addr, UVM_DEFAULT) + `uvm_field_int (csr_addr_map_size, UVM_DEFAULT) + `uvm_field_enum (clk_freq_mhz_e, clk_freq_mhz, UVM_DEFAULT) + `uvm_object_utils_end + + `uvm_object_new + + virtual function void initialize(bit [TL_AW-1:0] csr_base_addr = '1); + initialize_csr_addr_map_size(); + `DV_CHECK_NE_FATAL(csr_addr_map_size, 0, "csr_addr_map_size can't be 0") + // use locally randomized csr base address, unless provided as arg to this function + if (csr_base_addr != '1) begin + bit is_aligned; + this.csr_base_addr = csr_base_addr; + // check alignment + is_aligned = ~|(this.csr_base_addr & (this.csr_addr_map_size - 1)); + `DV_CHECK_EQ_FATAL(is_aligned, 1'b1) + end else begin + // base address needs to be aligned to csr_addr_map_size + `DV_CHECK_STD_RANDOMIZE_WITH_FATAL(csr_base_addr, + ~|(csr_base_addr & (csr_addr_map_size - 1));) + this.csr_base_addr = csr_base_addr; + end + // build the ral model + if (has_ral) begin + ral = RAL_T::type_id::create("ral"); + ral.build(this.csr_base_addr, null); + apply_ral_fixes(); + end + endfunction + + // This function must be implemented in extended class to + // initialize value of csr_addr_map_size member + virtual function void initialize_csr_addr_map_size(); + `uvm_fatal(`gfn, "This task must be implemented in the extended class!") + endfunction : initialize_csr_addr_map_size + + // ral flow is limited in terms of setting correct field access policies and reset values + // We apply those fixes here - please note these fixes need to be reflected in the scoreboard + protected virtual function void apply_ral_fixes(); + // fix access policies & reset values + endfunction + + virtual function void reset_asserted(); + this.under_reset = 1; + csr_utils_pkg::reset_asserted(); + endfunction + + virtual function void reset_deasserted(); + this.under_reset = 0; + csr_utils_pkg::reset_deasserted(); + endfunction +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_env_cov.sv b/vendor/lowrisc_ip/dv_lib/dv_base_env_cov.sv new file mode 100644 index 00000000..d5934628 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_env_cov.sv @@ -0,0 +1,42 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// TODO - We are enclosing generic covergroups inside class so that we can +// take avoid tool limitation of not allowing arrays of covergroup +// Refer to Issue#375 for more details +class dv_base_generic_cov_obj; + + // Covergroup: bit_toggle_cg + // Generic covergroup definition + covergroup bit_toggle_cg(string name, bit toggle_cov_en = 1) with function sample(bit value); + option.per_instance = 1; + option.name = name; + cp_value: coverpoint value; + cp_transitions: coverpoint value { + option.weight = toggle_cov_en; + bins rising = (0 => 1); + bins falling = (1 => 0); + } + endgroup : bit_toggle_cg + + // Function: new + function new(string name = "dv_base_generic_cov_obj", bit toggle_cov_en = 1); + bit_toggle_cg = new(name, toggle_cov_en); + endfunction : new + + // Function: sample + function void sample(bit value); + bit_toggle_cg.sample(value); + endfunction : sample + +endclass : dv_base_generic_cov_obj + +class dv_base_env_cov #(type CFG_T = dv_base_env_cfg) extends uvm_component; + `uvm_component_param_utils(dv_base_env_cov #(CFG_T)) + + CFG_T cfg; + + `uvm_component_new + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_mem.sv b/vendor/lowrisc_ip/dv_lib/dv_base_mem.sv new file mode 100644 index 00000000..5d4db40c --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_mem.sv @@ -0,0 +1,16 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// base register reg class which will be used to generate the reg mem +class dv_base_mem extends uvm_mem; + + function new(string name, + longint unsigned size, + int unsigned n_bits, + string access = "RW", + int has_coverage = UVM_NO_COVERAGE); + super.new(name, size, n_bits, access, has_coverage); + endfunction : new + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_monitor.sv b/vendor/lowrisc_ip/dv_lib/dv_base_monitor.sv new file mode 100644 index 00000000..9ec06742 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_monitor.sv @@ -0,0 +1,35 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_monitor #(type ITEM_T = uvm_sequence_item, + type CFG_T = dv_base_agent_cfg, + type COV_T = dv_base_agent_cov) extends uvm_monitor; + `uvm_component_param_utils(dv_base_monitor #(ITEM_T, CFG_T, COV_T)) + + CFG_T cfg; + COV_T cov; + + // Analysis port for the collected transfer. + uvm_analysis_port #(ITEM_T) analysis_port; + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + analysis_port = new("analysis_port", this); + endfunction + + virtual task run_phase(uvm_phase phase); + fork + collect_trans(phase); + join + endtask + + // collect transactions forever + virtual protected task collect_trans(uvm_phase phase); + `uvm_fatal(`gfn, "this method is not supposed to be called directly!") + endtask + +endclass + diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_reg.sv b/vendor/lowrisc_ip/dv_lib/dv_base_reg.sv new file mode 100644 index 00000000..43db3096 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_reg.sv @@ -0,0 +1,69 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// base register class which will be used to generate the reg +class dv_base_reg extends uvm_reg; + + function new(string name = "", + int unsigned n_bits, + int has_coverage); + super.new(name, n_bits, has_coverage); + endfunction : new + + + local dv_base_reg locked_regs[$]; + + function void get_dv_base_reg_fields(ref dv_base_reg_field dv_fields[$]); + uvm_reg_field ral_fields[$]; + get_fields(ral_fields); + foreach (ral_fields[i]) `downcast(dv_fields[i], ral_fields[i]) + endfunction + + // get_n_bits will return number of all the bits in the csr + // while this function will return actual number of bits used in reg field + function uint get_n_used_bits(); + uvm_reg_field fields[$]; + get_fields(fields); + foreach (fields[i]) get_n_used_bits += fields[i].get_n_bits(); + endfunction + + // loop all the fields to find the msb position of this reg + function uint get_msb_pos(); + uvm_reg_field fields[$]; + get_fields(fields); + foreach (fields[i]) begin + uint field_msb_pos = fields[i].get_lsb_pos() + fields[i].get_n_bits() - 1; + if (field_msb_pos > get_msb_pos) get_msb_pos = field_msb_pos; + end + endfunction + + // if the register is an enable reg, it will add controlled registers in the queue + function void add_locked_reg(dv_base_reg locked_reg); + locked_regs.push_back(locked_reg); + endfunction + + function bit is_enable_reg(); + return (locked_regs.size() > 0); + endfunction + + // if enable register is set to 1, the locked registers will be set to RO access + // once enable register is reset to 0, the locked registers will be set back to original access + function void set_locked_regs_access(string access = "original_access"); + foreach (locked_regs[i]) begin + dv_base_reg_field locked_fields[$]; + locked_regs[i].get_dv_base_reg_fields(locked_fields); + foreach (locked_fields[i]) locked_fields[i].set_locked_fields_access(access); + end + endfunction + + function void get_locked_regs(ref dv_base_reg locked_regs_q[$]); + locked_regs_q = locked_regs; + endfunction + + // post_write callback to handle reg enables + virtual task post_write(uvm_reg_item rw); + if (is_enable_reg() && (rw.value[0] & 1)) set_locked_regs_access("RO"); + endtask + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_reg_block.sv b/vendor/lowrisc_ip/dv_lib/dv_base_reg_block.sv new file mode 100644 index 00000000..2d1caa5e --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_reg_block.sv @@ -0,0 +1,56 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// base register block class which will be used to generate the reg blocks +class dv_base_reg_block extends uvm_reg_block; + `uvm_object_utils(dv_base_reg_block) + + csr_excl_item csr_excl; + + function new (string name = "", int has_coverage = UVM_NO_COVERAGE); + super.new(name, has_coverage); + endfunction + + // provide build function to supply base addr + virtual function void build(uvm_reg_addr_t base_addr, csr_utils_pkg::csr_excl_item csr_excl); + `uvm_fatal(`gfn, "this method is not supposed to be called directly!") + endfunction + + function void get_dv_base_reg_blocks(ref dv_base_reg_block blks[$]); + uvm_reg_block uvm_blks[$]; + this.get_blocks(uvm_blks); + foreach (uvm_blks[i]) `downcast(blks[i], uvm_blks[i]) + endfunction + + function void get_dv_base_regs(ref dv_base_reg dv_regs[$]); + uvm_reg ral_regs[$]; + this.get_registers(ral_regs); + foreach (ral_regs[i]) `downcast(dv_regs[i], ral_regs[i]) + endfunction + + function void get_enable_regs(ref dv_base_reg enable_regs[$]); + dv_base_reg_block blks[$]; + this.get_dv_base_reg_blocks(blks); + if (blks.size() == 0) begin + dv_base_reg all_regs[$]; + this.get_dv_base_regs(all_regs); + foreach (all_regs[i]) begin + if (all_regs[i].is_enable_reg()) enable_regs.push_back(all_regs[i]); + end + return; + end else begin + foreach (blks[i]) blks[i].get_enable_regs(enable_regs); + end + endfunction + + // override RAL's reset function to support enable registers + // when reset issued - the locked registers' access will be reset to original access + virtual function void reset(string kind = "HARD"); + dv_base_reg enable_regs[$]; + super.reset(kind); + get_enable_regs(enable_regs); + foreach (enable_regs[i]) enable_regs[i].set_locked_regs_access(); + endfunction + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_reg_field.sv b/vendor/lowrisc_ip/dv_lib/dv_base_reg_field.sv new file mode 100644 index 00000000..021d1edf --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_reg_field.sv @@ -0,0 +1,42 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// base register reg class which will be used to generate the reg field +class dv_base_reg_field extends uvm_reg_field; + local string m_original_access; + + `uvm_object_utils(dv_base_reg_field) + `uvm_object_new + + // when use UVM_PREDICT_WRITE and the CSR access is WO, this function will return the default + // val of the register, rather than the written value + virtual function uvm_reg_data_t XpredictX(uvm_reg_data_t cur_val, + uvm_reg_data_t wr_val, + uvm_reg_map map); + + if (get_access(map) == "WO") return cur_val; + else return super.XpredictX(cur_val, wr_val, map); + endfunction + + virtual function string get_original_access(); + return m_original_access; + endfunction + + virtual function void set_original_access(string access); + if (m_original_access == "") begin + m_original_access = access; + end else begin + `uvm_fatal(`gfn, "register original access can only be written once") + end + endfunction + + virtual function void set_locked_fields_access(string access = "original_access"); + case (access) + "RO": void'(this.set_access(access)); + "original_access": void'(this.set_access(m_original_access)); + default: `uvm_fatal(`gfn, $sformatf("attempt to set access to %s", access)) + endcase + endfunction + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_reg_map.sv b/vendor/lowrisc_ip/dv_lib/dv_base_reg_map.sv new file mode 100644 index 00000000..cce6d6ec --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_reg_map.sv @@ -0,0 +1,9 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// base register reg class which will be used to generate the reg map +class dv_base_reg_map extends uvm_reg_map; + `uvm_object_utils(dv_base_reg_map) + `uvm_object_new +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_scoreboard.sv b/vendor/lowrisc_ip/dv_lib/dv_base_scoreboard.sv new file mode 100644 index 00000000..85fd20bf --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_scoreboard.sv @@ -0,0 +1,80 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_scoreboard #(type RAL_T = dv_base_reg_block, + type CFG_T = dv_base_env_cfg, + type COV_T = dv_base_env_cov) extends uvm_component; + `uvm_component_param_utils(dv_base_scoreboard #(RAL_T, CFG_T, COV_T)) + + CFG_T cfg; + RAL_T ral; + COV_T cov; + + bit obj_raised = 1'b0; + bit under_pre_abort = 1'b0; + + `uvm_component_new + + virtual function void build_phase(uvm_phase phase); + super.build_phase(phase); + ral = cfg.ral; + endfunction + + virtual task run_phase(uvm_phase phase); + super.run_phase(phase); + fork + monitor_reset(); + join_none + endtask + + virtual task monitor_reset(); + forever begin + if (!cfg.clk_rst_vif.rst_n) begin + `uvm_info(`gfn, "reset occurred", UVM_HIGH) + cfg.reset_asserted(); + @(posedge cfg.clk_rst_vif.rst_n); + reset(); + cfg.reset_deasserted(); + `uvm_info(`gfn, "out of reset", UVM_HIGH) + end + else begin + // wait for a change to rst_n + @(cfg.clk_rst_vif.rst_n); + end + end + endtask + + // raise / drop objections based on certain events + virtual function void process_objections(bit raise); + if (raise && !obj_raised) begin + m_current_phase.raise_objection(this, $sformatf("%s objection raised", `gfn)); + obj_raised = 1'b1; + end + else if (!raise && obj_raised) begin + m_current_phase.drop_objection(this, $sformatf("%s objection dropped", `gfn)); + obj_raised = 1'b0; + end + endfunction + + virtual function void reset(string kind = "HARD"); + // reset the ral model + if (cfg.has_ral) ral.reset(kind); + endfunction + + virtual function void pre_abort(); + super.pre_abort(); + // use under_pre_abort flag to prevent deadloop described below: + // when fatal_err occurred, it will skip check_phase. We add the additional check_phase call + // here to help debugging. But if inside the check_phase there are UVM_ERRORs, and the err cnt + // is larger than max_err_cnt, then check_phase will call pre_abort again. This will end up + // creating a deadloop. + if (has_uvm_fatal_occurred() && !under_pre_abort) begin + under_pre_abort = 1; + check_phase(m_current_phase); + under_pre_abort = 0; + end + endfunction : pre_abort + +endclass + diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_seq.sv b/vendor/lowrisc_ip/dv_lib/dv_base_seq.sv new file mode 100644 index 00000000..8c6aca97 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_seq.sv @@ -0,0 +1,25 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_seq #(type REQ = uvm_sequence_item, + type RSP = REQ, + type CFG_T = dv_base_agent_cfg, + type SEQUENCER_T = dv_base_sequencer) extends uvm_sequence#(REQ, RSP); + `uvm_object_param_utils(dv_base_seq #(REQ, RSP, CFG_T, SEQUENCER_T)) + `uvm_declare_p_sequencer(SEQUENCER_T) + + CFG_T cfg; + + `uvm_object_new + + task pre_start(); + super.pre_start(); + cfg = p_sequencer.cfg; + endtask + + task body(); + `uvm_fatal(`gtn, "Need to override this when you extend from this class!") + endtask : body + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_sequencer.sv b/vendor/lowrisc_ip/dv_lib/dv_base_sequencer.sv new file mode 100644 index 00000000..fbd69276 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_sequencer.sv @@ -0,0 +1,13 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_sequencer #(type ITEM_T = uvm_sequence_item, + type CFG_T = dv_base_agent_cfg) extends uvm_sequencer #(ITEM_T); + `uvm_component_param_utils(dv_base_sequencer #(ITEM_T, CFG_T)) + + CFG_T cfg; + + `uvm_component_new + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_test.sv b/vendor/lowrisc_ip/dv_lib/dv_base_test.sv new file mode 100644 index 00000000..aa92db95 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_test.sv @@ -0,0 +1,76 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_test #(type CFG_T = dv_base_env_cfg, + type ENV_T = dv_base_env) extends uvm_test; + `uvm_component_param_utils(dv_base_test #(CFG_T, ENV_T)) + + ENV_T env; + CFG_T cfg; + bit run_test_seq = 1'b1; + string test_seq_s; + + uint max_quit_count = 1; + uint64 test_timeout_ns = 200_000_000; // 200ms + uint drain_time_ns = 2_000; // 2us + + `uvm_component_new + + virtual function void build_phase(uvm_phase phase); + dv_report_server m_dv_report_server = new(); + uvm_report_server::set_server(m_dv_report_server); + + super.build_phase(phase); + + env = ENV_T::type_id::create("env", this); + cfg = CFG_T::type_id::create("cfg", this); + // don't add args for initialize. Use default value instead + cfg.initialize(); + `DV_CHECK_RANDOMIZE_FATAL(cfg) + uvm_config_db#(CFG_T)::set(this, "env", "cfg", cfg); + + // knob to en/dis scb (enabled by default) + void'($value$plusargs("en_scb=%0b", cfg.en_scb)); + // knob to cfg all agents with zero delays + void'($value$plusargs("zero_delays=%0b", cfg.zero_delays)); + endfunction : build_phase + + virtual function void end_of_elaboration_phase(uvm_phase phase); + super.end_of_elaboration_phase(phase); + void'($value$plusargs("max_quit_count=%0d", max_quit_count)); + set_max_quit_count(max_quit_count); + void'($value$plusargs("test_timeout_ns=%0d", test_timeout_ns)); + uvm_top.set_timeout((test_timeout_ns * 1ns)); + endfunction : end_of_elaboration_phase + + virtual task run_phase(uvm_phase phase); + void'($value$plusargs("drain_time_ns=%0d", drain_time_ns)); + phase.phase_done.set_drain_time(this, (drain_time_ns * 1ns)); + void'($value$plusargs("UVM_TEST_SEQ=%0s", test_seq_s)); + if (run_test_seq) begin + run_seq(test_seq_s, phase); + end + // TODO: add hook for end of test checking + endtask : run_phase + + virtual task run_seq(string test_seq_s, uvm_phase phase); + uvm_sequence test_seq = create_seq_by_name(test_seq_s); + + // provide virtual_sequencer earlier, so we may use the p_sequencer in constraint + test_seq.set_sequencer(env.virtual_sequencer); + `DV_CHECK_RANDOMIZE_FATAL(test_seq) + + `uvm_info(`gfn, {"starting vseq ", test_seq_s}, UVM_MEDIUM) + phase.raise_objection(this, $sformatf("%s objection raised", `gn)); + test_seq.start(env.virtual_sequencer); + phase.drop_objection(this, $sformatf("%s objection dropped", `gn)); + phase.phase_done.display_objections(); + `uvm_info(`gfn, {"finished vseq ", test_seq_s}, UVM_MEDIUM) + endtask + + // TODO: add default report_phase implementation + +endclass : dv_base_test + + diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_virtual_sequencer.sv b/vendor/lowrisc_ip/dv_lib/dv_base_virtual_sequencer.sv new file mode 100644 index 00000000..3fe74281 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_virtual_sequencer.sv @@ -0,0 +1,14 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_virtual_sequencer #(type CFG_T = dv_base_env_cfg, + type COV_T = dv_base_env_cov) extends uvm_sequencer; + `uvm_component_param_utils(dv_base_virtual_sequencer #(CFG_T, COV_T)) + + CFG_T cfg; + COV_T cov; + + `uvm_component_new + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_base_vseq.sv b/vendor/lowrisc_ip/dv_lib/dv_base_vseq.sv new file mode 100644 index 00000000..85aa5df1 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_base_vseq.sv @@ -0,0 +1,187 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class dv_base_vseq #(type RAL_T = dv_base_reg_block, + type CFG_T = dv_base_env_cfg, + type COV_T = dv_base_env_cov, + type VIRTUAL_SEQUENCER_T = dv_base_virtual_sequencer) extends uvm_sequence; + `uvm_object_param_utils(dv_base_vseq #(RAL_T, CFG_T, COV_T, VIRTUAL_SEQUENCER_T)) + `uvm_declare_p_sequencer(VIRTUAL_SEQUENCER_T) + + // number of iterations to run the test seq - please override constraint in extended vseq + // randomization for this is disabled in pre_start since we don't want to re-randomize it again + rand uint num_trans; + + constraint num_trans_c { + num_trans inside {[1:20]}; + } + + // handles for ease of op + CFG_T cfg; + RAL_T ral; + COV_T cov; + + // knobs to enable pre_start routines + bit do_dut_init = 1'b1; + bit do_apply_reset = 1'b1; + bit do_wait_for_reset = 1'b1; + + // knobs to enable post_start routines + bit do_dut_shutdown = 1'b1; + + `uvm_object_new + + task pre_start(); + super.pre_start(); + cfg = p_sequencer.cfg; + cov = p_sequencer.cov; + ral = cfg.ral; + if (do_dut_init) dut_init("HARD"); + num_trans.rand_mode(0); + endtask + + task body(); + `uvm_fatal(`gtn, "Need to override this when you extend from this class!") + endtask : body + + task post_start(); + super.post_start(); + if (do_dut_shutdown) dut_shutdown(); + endtask + + /* + * startup, reset and shutdown related tasks + */ + virtual task dut_init(string reset_kind = "HARD"); + if (do_apply_reset) apply_reset(reset_kind); + else if (do_wait_for_reset) wait_for_reset(reset_kind); + // delay after reset for tl agent check seq_item_port empty + #1ps; + endtask + + virtual task apply_reset(string kind = "HARD"); + if (kind == "HARD") begin + csr_utils_pkg::reset_asserted(); + cfg.clk_rst_vif.apply_reset(); + csr_utils_pkg::reset_deasserted(); + end + if (cfg.has_ral) ral.reset(kind); + endtask + + virtual task wait_for_reset(string reset_kind = "HARD", + bit wait_for_assert = 1, + bit wait_for_deassert = 1); + if (wait_for_assert) begin + `uvm_info(`gfn, "waiting for rst_n assertion...", UVM_MEDIUM) + @(negedge cfg.clk_rst_vif.rst_n); + end + if (wait_for_deassert) begin + `uvm_info(`gfn, "waiting for rst_n de-assertion...", UVM_MEDIUM) + @(posedge cfg.clk_rst_vif.rst_n); + end + `uvm_info(`gfn, "wait_for_reset done", UVM_HIGH) + endtask + + // dut shutdown - this is called in post_start if do_dut_shutdown bit is set + virtual task dut_shutdown(); + csr_utils_pkg::wait_no_outstanding_access(); + endtask + + // function to add csr exclusions of the given type using the csr_excl_item item + // arg csr_test_type: this the the type of csr test run - we may want additional exclusions + // depending on what test seq we are running + // arg csr_excl: this is the csr exclusion object that maintains the list of exclusions + // the same object handle is to be passed to csr sequences in csr_seq_lib so that they can query + // those exclusions + virtual function void add_csr_exclusions(string csr_test_type, + csr_excl_item csr_excl, + string scope = "ral"); + `uvm_info(`gfn, "no exclusion item added from this function", UVM_DEBUG) + endfunction + + // TODO: temp support, can delete this once all IPs update their exclusion in hjson + virtual function csr_excl_item add_and_return_csr_excl(string csr_test_type); + add_csr_exclusions(csr_test_type, ral.csr_excl); + ral.csr_excl.print_exclusions(); + return ral.csr_excl; + endfunction + + // wrapper task around run_csr_vseq - the purpose is to be able to call this directly for actual + // csr tests (as opposed to higher level stress test that could also run csr seq as a fork by + // calling run_csr_vseq(..) task) + virtual task run_csr_vseq_wrapper(int num_times = 1); + string csr_test_type; + csr_excl_item csr_excl; + + // env needs to have a ral instance + `DV_CHECK_EQ_FATAL(cfg.has_ral, 1'b1) + + // get csr_test_type from plusarg + void'($value$plusargs("csr_%0s", csr_test_type)); + + // create csr exclusions before running the csr seq + csr_excl = add_and_return_csr_excl(csr_test_type); + + // run the csr seq + for (int i = 1; i <= num_times; i++) begin + `uvm_info(`gfn, $sformatf("running csr %0s vseq iteration %0d/%0d", + csr_test_type, i, num_times), UVM_LOW) + run_csr_vseq(.csr_test_type(csr_test_type), .csr_excl(csr_excl)); + end + endtask + + // capture the entire csr seq as a task that can be overridden if desired + // arg csr_test_type: what csr test to run {hw_reset, rw, bit_bash, aliasing} + // arg csr_excl: csr exclusion object - needs to be created and exclusions set before call + // arg num_test_csrs:instead of testing the entire ral model or passing test chunk info via + // plusarg, provide ability to set a random number of csrs to test from higher level sequence + virtual task run_csr_vseq(string csr_test_type = "", + csr_excl_item csr_excl = null, + int num_test_csrs = 0, + bit do_rand_wr_and_reset = 1); + csr_base_seq m_csr_seq; + + // env needs to have a ral instance + `DV_CHECK_EQ_FATAL(cfg.has_ral, 1'b1) + + // check which csr test type + case (csr_test_type) + "hw_reset": csr_base_seq::type_id::set_type_override(csr_hw_reset_seq::get_type()); + "rw" : csr_base_seq::type_id::set_type_override(csr_rw_seq::get_type()); + "bit_bash": csr_base_seq::type_id::set_type_override(csr_bit_bash_seq::get_type()); + "aliasing": csr_base_seq::type_id::set_type_override(csr_aliasing_seq::get_type()); + "mem_walk": csr_base_seq::type_id::set_type_override(csr_mem_walk_seq::get_type()); + default : `uvm_fatal(`gfn, $sformatf("specified opt is invalid: +csr_%0s", csr_test_type)) + endcase + + // if hw_reset test, then write all CSRs first and reset the whole dut + if (csr_test_type == "hw_reset" && do_rand_wr_and_reset) begin + string reset_type = "HARD"; + csr_write_seq m_csr_write_seq; + + // run write-only sequence to randomize the csr values + m_csr_write_seq = csr_write_seq::type_id::create("m_csr_write_seq"); + m_csr_write_seq.models.push_back(ral); + m_csr_write_seq.set_csr_excl_item(csr_excl); + m_csr_write_seq.external_checker = cfg.en_scb; + m_csr_write_seq.start(null); + + // run dut_shutdown before asserting reset + dut_shutdown(); + + // issue reset + void'($value$plusargs("do_reset=%0s", reset_type)); + dut_init(reset_type); + end + + // create base csr seq and pass our ral + m_csr_seq = csr_base_seq::type_id::create("m_csr_seq"); + m_csr_seq.num_test_csrs = num_test_csrs; + m_csr_seq.models.push_back(ral); + m_csr_seq.set_csr_excl_item(csr_excl); + m_csr_seq.external_checker = cfg.en_scb; + m_csr_seq.start(null); + endtask + +endclass diff --git a/vendor/lowrisc_ip/dv_lib/dv_lib.core b/vendor/lowrisc_ip/dv_lib/dv_lib.core new file mode 100644 index 00000000..73c7fe16 --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_lib.core @@ -0,0 +1,42 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:dv_lib" +description: "DV base class UVM library" + +filesets: + files_dv: + depend: + - lowrisc:dv:dv_utils + - lowrisc:dv:csr_utils + files: + - dv_lib_pkg.sv + + - dv_base_reg_field.sv: {is_include_file: true} + - dv_base_reg.sv: {is_include_file: true} + - dv_base_mem.sv: {is_include_file: true} + - dv_base_reg_block.sv: {is_include_file: true} + - dv_base_reg_map.sv: {is_include_file: true} + + - dv_base_agent_cfg.sv: {is_include_file: true} + - dv_base_agent_cov.sv: {is_include_file: true} + - dv_base_monitor.sv: {is_include_file: true} + - dv_base_sequencer.sv: {is_include_file: true} + - dv_base_driver.sv: {is_include_file: true} + - dv_base_agent.sv: {is_include_file: true} + - dv_base_seq.sv: {is_include_file: true} + + - dv_base_env_cfg.sv: {is_include_file: true} + - dv_base_env_cov.sv: {is_include_file: true} + - dv_base_virtual_sequencer.sv: {is_include_file: true} + - dv_base_scoreboard.sv: {is_include_file: true} + - dv_base_env.sv: {is_include_file: true} + - dv_base_vseq.sv: {is_include_file: true} + - dv_base_test.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/dv_lib/dv_lib_pkg.sv b/vendor/lowrisc_ip/dv_lib/dv_lib_pkg.sv new file mode 100644 index 00000000..0036c4ec --- /dev/null +++ b/vendor/lowrisc_ip/dv_lib/dv_lib_pkg.sv @@ -0,0 +1,51 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package dv_lib_pkg; + // dep packages + import uvm_pkg::*; + import top_pkg::*; + import dv_utils_pkg::*; + import csr_utils_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // package variables + string msg_id = "dv_lib_pkg"; + + // package sources + // base ral + `include "dv_base_reg_field.sv" + `include "dv_base_reg.sv" + `include "dv_base_mem.sv" + `include "dv_base_reg_block.sv" + `include "dv_base_reg_map.sv" + + // base agent + `include "dv_base_agent_cfg.sv" + `include "dv_base_agent_cov.sv" + `include "dv_base_monitor.sv" + `include "dv_base_sequencer.sv" + `include "dv_base_driver.sv" + `include "dv_base_agent.sv" + + // base seq + `include "dv_base_seq.sv" + + // base env + `include "dv_base_env_cfg.sv" + `include "dv_base_env_cov.sv" + `include "dv_base_virtual_sequencer.sv" + `include "dv_base_scoreboard.sv" + `include "dv_base_env.sv" + + // base test vseq + `include "dv_base_vseq.sv" + + // base test + `include "dv_base_test.sv" + +endpackage diff --git a/vendor/lowrisc_ip/dv_utils/README.md b/vendor/lowrisc_ip/dv_utils/README.md new file mode 100644 index 00000000..e69de29b diff --git a/vendor/lowrisc_ip/dv_utils/dv_macros.svh b/vendor/lowrisc_ip/dv_utils/dv_macros.svh new file mode 100644 index 00000000..bf78e0fb --- /dev/null +++ b/vendor/lowrisc_ip/dv_utils/dv_macros.svh @@ -0,0 +1,288 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// UVM speficic macros +`ifndef gfn + `define gfn get_full_name() +`endif + +`ifndef gtn + `define gtn get_type_name() +`endif + +`ifndef gn + `define gn get_name() +`endif + +`ifndef gmv + `define gmv(csr) csr.get_mirrored_value() +`endif + +// cast base class obj holding extended class handle to extended class handle; +// throw error if cast fails +`ifndef downcast + `define downcast(EXT_, BASE_, MSG_="", SEV_=fatal, ID_=`gfn) \ + if (!$cast(EXT_, BASE_)) begin \ + `uvm_``SEV_(ID_, $sformatf({"Cast failed: base class variable %0s ", \ + "does not hold extended class %0s handle %s"}, \ + `"BASE_`", `"EXT_`", MSG_)) \ + end +`endif + +// Note, UVM provides a macro `uvm_new_func -- which only applies to uvm_components +`ifndef uvm_object_new + `define uvm_object_new \ + function new (string name=""); \ + super.new(name); \ + endfunction : new +`endif + +`ifndef uvm_create_obj + `define uvm_create_obj(_type_name_, _inst_name_) \ + _inst_name_ = _type_name_::type_id::create(`"_inst_name_`"); +`endif + +`ifndef uvm_component_new + `define uvm_component_new \ + function new (string name="", uvm_component parent=null); \ + super.new(name, parent); \ + endfunction : new +`endif + +`ifndef uvm_create_comp + `define uvm_create_comp(_type_name_, _inst_name_) \ + _inst_name_ = _type_name_::type_id::create(`"_inst_name_`", this); +`endif + +// Common check macros used by DV_CHECK error and fatal macros. +// Note: Should not be called by user code +`ifndef DV_CHECK + `define DV_CHECK(T_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(T_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed (%s) %s ", `"T_`", MSG_)) \ + end +`endif + +`ifndef DV_CHECK_EQ + `define DV_CHECK_EQ(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ == EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s == %s (%0d [0x%0h] vs %0d [0x%0h]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +`ifndef DV_CHECK_NE + `define DV_CHECK_NE(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ != EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s != %s (%0d [0x%0h] vs %0d [0x%0h]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +`ifndef DV_CHECK_CASE_EQ + `define DV_CHECK_CASE_EQ(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ === EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s === %s (0x%0h [%0b] vs 0x%0h [%0b]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +`ifndef DV_CHECK_CASE_NE + `define DV_CHECK_CASE_NE(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ !== EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s !== %s (%0d [0x%0h] vs %0d [0x%0h]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +`ifndef DV_CHECK_LT + `define DV_CHECK_LT(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ < EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s < %s (%0d [0x%0h] vs %0d [0x%0h]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +`ifndef DV_CHECK_GT + `define DV_CHECK_GT(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ > EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s > %s (%0d [0x%0h] vs %0d [0x%0h]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +`ifndef DV_CHECK_LE + `define DV_CHECK_LE(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ <= EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s <= %s (%0d [0x%0h] vs %0d [0x%0h]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +`ifndef DV_CHECK_GE + `define DV_CHECK_GE(ACT_, EXP_, MSG_="", SEV_=error, ID_=`gfn) \ + if (!(ACT_ >= EXP_)) begin \ + `uvm_``SEV_(ID_, $sformatf("Check failed %s >= %s (%0d [0x%0h] vs %0d [0x%0h]) %s", \ + `"ACT_`", `"EXP_`", ACT_, ACT_, EXP_, EXP_, MSG_)) \ + end +`endif + +// Fatal version of the checks +`ifndef DV_CHECK_FATAL + `define DV_CHECK_FATAL(T_, MSG_="", ID_=`gfn) \ + `DV_CHECK(T_, MSG_, fatal, ID_) +`endif + +`ifndef DV_CHECK_EQ_FATAL + `define DV_CHECK_EQ_FATAL(ACT_, EXP_, MSG_="", ID_=`gfn) \ + `DV_CHECK_EQ(ACT_, EXP_, MSG_, fatal, ID_) +`endif + +`ifndef DV_CHECK_NE_FATAL + `define DV_CHECK_NE_FATAL(ACT_, EXP_, MSG_="", ID_=`gfn) \ + `DV_CHECK_NE(ACT_, EXP_, MSG_, fatal, ID_) +`endif + +`ifndef DV_CHECK_LT_FATAL + `define DV_CHECK_LT_FATAL(ACT_, EXP_, MSG_="", ID_=`gfn) \ + `DV_CHECK_LT(ACT_, EXP_, MSG_, fatal, ID_) +`endif + +`ifndef DV_CHECK_GT_FATAL + `define DV_CHECK_GT_FATAL(ACT_, EXP_, MSG_="", ID_=`gfn) \ + `DV_CHECK_GT(ACT_, EXP_, MSG_, fatal, ID_) +`endif + +`ifndef DV_CHECK_LE_FATAL + `define DV_CHECK_LE_FATAL(ACT_, EXP_, MSG_="", ID_=`gfn) \ + `DV_CHECK_LE(ACT_, EXP_, MSG_, fatal, ID_) +`endif + +`ifndef DV_CHECK_GE_FATAL + `define DV_CHECK_GE_FATAL(ACT_, EXP_, MSG_="", ID_=`gfn) \ + `DV_CHECK_GE(ACT_, EXP_, MSG_, fatal, ID_) +`endif + +// Shorthand for common foo.randomize() + fatal check +`ifndef DV_CHECK_RANDOMIZE_FATAL + `define DV_CHECK_RANDOMIZE_FATAL(VAR_, MSG_="Randomization failed!", ID_=`gfn) \ + `DV_CHECK_FATAL(VAR_.randomize(), MSG_, ID_) +`endif + +// Shorthand for common foo.randomize() with { } + fatal check +`ifndef DV_CHECK_RANDOMIZE_WITH_FATAL + `define DV_CHECK_RANDOMIZE_WITH_FATAL(VAR_, WITH_C_, MSG_="Randomization failed!", ID_=`gfn) \ + `DV_CHECK_FATAL(VAR_.randomize() with {WITH_C_}, MSG_, ID_) +`endif + +// Shorthand for common std::randomize(foo) + fatal check +`ifndef DV_CHECK_STD_RANDOMIZE_FATAL + `define DV_CHECK_STD_RANDOMIZE_FATAL(VAR_, MSG_="Randomization failed!", ID_=`gfn) \ + `DV_CHECK_FATAL(std::randomize(VAR_), MSG_, ID_) +`endif + +// Shorthand for common std::randomize(foo) with { } + fatal check +`ifndef DV_CHECK_STD_RANDOMIZE_WITH_FATAL + `define DV_CHECK_STD_RANDOMIZE_WITH_FATAL(VAR_, WITH_C_, MSG_="Randomization failed!",ID_=`gfn) \ + `DV_CHECK_FATAL(std::randomize(VAR_) with {WITH_C_}, MSG_, ID_) +`endif + +// Shorthand for common this.randomize(foo) + fatal check +`ifndef DV_CHECK_MEMBER_RANDOMIZE_FATAL + `define DV_CHECK_MEMBER_RANDOMIZE_FATAL(VAR_, MSG_="Randomization failed!", ID_=`gfn) \ + `DV_CHECK_FATAL(this.randomize(VAR_), MSG_, ID_) +`endif + +// Shorthand for common this.randomize(foo) with { } + fatal check +`ifndef DV_CHECK_MEMBER_RANDOMIZE_WITH_FATAL + `define DV_CHECK_MEMBER_RANDOMIZE_WITH_FATAL(VAR_, C_, MSG_="Randomization failed!", ID_=`gfn) \ + `DV_CHECK_FATAL(this.randomize(VAR_) with {C_}, MSG_, ID_) +`endif + +// print static/dynamic 1d array or queue +`ifndef DV_PRINT_ARR_CONTENTS +`define DV_PRINT_ARR_CONTENTS(ARR_, V_=UVM_MEDIUM, ID_=`gfn) \ + foreach (ARR_[i]) begin \ + `uvm_info(ID_, $sformatf("%s[%0d] = 0x%0d[0x%0h]", `"ARR_`", i, ARR_[i], ARR_[i]), V_) \ + end +`endif + +// print non-empty tlm fifos that were uncompared at end of test +`ifndef DV_EOT_PRINT_TLM_FIFO_CONTENTS +`define DV_EOT_PRINT_TLM_FIFO_CONTENTS(TYP_, FIFO_, SEV_=error, ID_=`gfn) \ + while (!FIFO_.is_empty()) begin \ + TYP_ item; \ + void'(FIFO_.try_get(item)); \ + `uvm_``SEV_(ID_, $sformatf("%s item uncompared:\n%s", `"FIFO_`", item.sprint())) \ + end +`endif + +// print non-empty tlm fifos that were uncompared at end of test +`ifndef DV_EOT_PRINT_TLM_FIFO_ARR_CONTENTS +`define DV_EOT_PRINT_TLM_FIFO_ARR_CONTENTS(TYP_, FIFO_, SEV_=error, ID_=`gfn) \ + foreach (FIFO_[i]) begin \ + while (!FIFO_[i].is_empty()) begin \ + TYP_ item; \ + void'(FIFO_[i].try_get(item)); \ + `uvm_``SEV_(ID_, $sformatf("%s[%0d] item uncompared:\n%s", `"FIFO_`", i, item.sprint())) \ + end \ + end +`endif + +// print non-empty tlm fifos that were uncompared at end of test +`ifndef DV_EOT_PRINT_Q_CONTENTS +`define DV_EOT_PRINT_Q_CONTENTS(TYP_, Q_, SEV_=error, ID_=`gfn) \ + while (Q_.size() != 0) begin \ + TYP_ item = Q_.pop_front(); \ + `uvm_``SEV_(ID_, $sformatf("%s item uncompared:\n%s", `"Q_`", item.sprint())) \ + end +`endif + +// print non-empty tlm fifos that were uncompared at end of test +`ifndef DV_EOT_PRINT_Q_ARR_CONTENTS +`define DV_EOT_PRINT_Q_ARR_CONTENTS(TYP_, Q_, SEV_=error, ID_=`gfn) \ + foreach (Q_[i]) begin \ + while (Q_[i].size() != 0) begin \ + TYP_ item = Q_[i].pop_front(); \ + `uvm_``SEV_(ID_, $sformatf("%s[%0d] item uncompared:\n%s", `"Q_`", i, item.sprint())) \ + end \ + end +`endif + +// check for non-empty mailbox and print items that were uncompared at end of test +`ifndef DV_EOT_PRINT_MAILBOX_CONTENTS +`define DV_EOT_PRINT_MAILBOX_CONTENTS(TYP_, MAILBOX_, SEV_=error, ID_=`gfn) \ + while (MAILBOX_.num() != 0) begin \ + TYP_ item; \ + void'(MAILBOX_.try_get(item)); \ + `uvm_``SEV_(ID_, $sformatf("%s item uncompared:\n%s", `"MAILBOX_`", item.sprint())) \ + end +`endif + +// get parity - implemented as a macro so that it can be invoked in constraints as well +`ifndef GET_PARITY + `define GET_PARITY(val, odd=0) (^val ^ odd) +`endif + +// wait a task or statement with timer watchdog +// input WAIT_ need to be a statement. Here are some examples +// `DV_SPINWAIT(wait(...);, "Wait for ...") +// `DV_SPINWAIT( +// while (1) begin +// ... +// end) +`ifndef DV_SPINWAIT +`define DV_SPINWAIT(WAIT_, MSG_ = "", TIMEOUT_NS_ = default_timeout_ns, ID_ =`gfn) \ + fork begin \ + fork \ + begin \ + WAIT_ \ + end \ + begin \ + wait_timeout(TIMEOUT_NS_, ID_, MSG_); \ + end \ + join_any \ + disable fork; \ + end join +`endif diff --git a/vendor/lowrisc_ip/dv_utils/dv_report_server.sv b/vendor/lowrisc_ip/dv_utils/dv_report_server.sv new file mode 100644 index 00000000..a29342e6 --- /dev/null +++ b/vendor/lowrisc_ip/dv_utils/dv_report_server.sv @@ -0,0 +1,87 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// Standardize look & feel of report phase and uvm logging messages. +class dv_report_server extends uvm_default_report_server; + + bit show_file_line = 1'b1; + bit use_default_uvm_report_message_format = 1'b0; + + function new (string name = ""); + super.new(name); + // provide ability to override these knobs over cli + void'($value$plusargs("show_file_line=%0b", show_file_line)); + void'($value$plusargs("use_default_uvm_report_message_format=%0b", + use_default_uvm_report_message_format)); + endfunction + + function void report_summarize(UVM_FILE file = 0); + int num_uvm_warning; + int num_uvm_error; + int num_uvm_fatal; + + num_uvm_warning = get_severity_count(UVM_WARNING); + num_uvm_error = get_severity_count(UVM_ERROR); + num_uvm_fatal = get_severity_count(UVM_FATAL); + + // Print default summary report + super.report_summarize(file); + + // Print final test pass-fail - external tool can use this signature for test status + // Treat UVM_WARNINGs as a sign of test failure since it could silently result in false pass + if ((num_uvm_warning + num_uvm_error + num_uvm_fatal) == 0) begin + $display("\nTEST PASSED CHECKS"); + $display(" _____ _ _ _ "); + $display("|_ _|__ ___| |_ _ __ __ _ ___ ___ ___ __| | |"); + $display(" | |/ _ \\/ __| __| | '_ \\ / _` / __/ __|/ _ \\/ _` | |"); + $display(" | | __/\\__ \\ |_ | |_) | (_| \\__ \\__ \\ __/ (_| |_|"); + $display(" |_|\\___||___/\\__| | .__/ \\__,_|___/___/\\___|\\__,_(_)"); + $display(" |_| \n"); + end + else begin + $display("\nTEST FAILED CHECKS"); + $display(" _____ _ __ _ _ _ _ "); + $display("|_ _|__ ___| |_ / _| __ _(_) | ___ __| | |"); + $display(" | |/ _ \\/ __| __| | |_ / _` | | |/ _ \\/ _` | |"); + $display(" | | __/\\__ \\ |_ | _| (_| | | | __/ (_| |_|"); + $display(" |_|\\___||___/\\__| |_| \\__,_|_|_|\\___|\\__,_(_)\n"); + end + + endfunction + + // Override default messaging format to standard "pretty" format for all testbenches + virtual function string compose_report_message(uvm_report_message report_message, + string report_object_name = ""); + + if (use_default_uvm_report_message_format) begin + return (super.compose_report_message(report_message, report_object_name)); + end else begin + uvm_severity severity = report_message.get_severity(); + string filename = report_message.get_filename(); + int line = report_message.get_line(); + string obj_name = report_message.get_report_object().get_full_name(); + string id = report_message.get_id(); + string message = report_message.get_message(); + string file_line; + + if (show_file_line && filename != "") begin + filename = get_no_hier_filename(filename); + file_line = $sformatf("(%0s:%0d) ", filename, line); + end + obj_name = {obj_name, ((obj_name != "") ? " " : "")}; + compose_report_message = $sformatf({"%0s @ %t: ", file_line, obj_name, "[%0s] %0s"}, + severity.name(), $realtime, id, message); + return compose_report_message; + end + endfunction + + // get we don't really want the full path to the filename + // this should be reasonably lightweight + local function string get_no_hier_filename(string filename); + int idx; + for (idx = filename.len() - 1; idx >= 0; idx--) if (filename[idx] == "/") break; + return (filename.substr(idx + 1, filename.len() - 1)); + endfunction + +endclass diff --git a/vendor/lowrisc_ip/dv_utils/dv_utils.core b/vendor/lowrisc_ip/dv_utils/dv_utils.core new file mode 100644 index 00000000..0dba0235 --- /dev/null +++ b/vendor/lowrisc_ip/dv_utils/dv_utils.core @@ -0,0 +1,23 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:dv_utils" +description: "DV utilities" + +filesets: + files_dv: + depend: + - lowrisc:dv:common_ifs + - lowrisc:prim:assert:0.1 + - lowrisc:constants:top_pkg + files: + - dv_utils_pkg.sv + - dv_macros.svh: {is_include_file: true} + - dv_report_server.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/dv_utils/dv_utils_pkg.sv b/vendor/lowrisc_ip/dv_utils/dv_utils_pkg.sv new file mode 100644 index 00000000..2ba39870 --- /dev/null +++ b/vendor/lowrisc_ip/dv_utils/dv_utils_pkg.sv @@ -0,0 +1,145 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package dv_utils_pkg; + // dep packages + import uvm_pkg::*; + import top_pkg::*; + + // macro includes + `include "dv_macros.svh" + `include "uvm_macros.svh" + + // common parameters used across all benches + parameter int NUM_MAX_INTERRUPTS = 32; + parameter int NUM_MAX_ALERTS = 32; + + // types & variables + typedef bit [31:0] uint; + typedef bit [7:0] uint8; + typedef bit [15:0] uint16; + typedef bit [31:0] uint32; + typedef bit [63:0] uint64; + + // typedef parameterized pins_if for ease of implementation for interrupts and alerts + typedef virtual pins_if #(NUM_MAX_INTERRUPTS) intr_vif; + typedef virtual pins_if #(1) devmode_vif; + typedef virtual pins_if #(1) tlul_assert_ctrl_vif; + + // interface direction / mode - Host or Device + typedef enum bit { + Host, + Device + } if_mode_e; + + // speed for the clock + typedef enum int { + ClkFreq24Mhz = 24, + ClkFreq25Mhz = 25, + ClkFreq48Mhz = 48, + ClkFreq50Mhz = 50, + ClkFreq100Mhz = 100 + } clk_freq_mhz_e; + + // compare operator types + typedef enum { + CompareOpEq, + CompareOpCaseEq, + CompareOpNe, + CompareOpCaseNe, + CompareOpGt, + CompareOpGe, + CompareOpLt, + CompareOpLe + } compare_op_e; + + // mem address struct + typedef struct { + uvm_reg_addr_t start_addr; + uvm_reg_addr_t end_addr; + } addr_range_t; + + // Enum representing a bus operation type - read or write. + typedef enum bit { + BusOpWrite = 1'b0, + BusOpRead = 1'b1 + } bus_op_e; + + // Enum representing a type of host requests - read only, write only or random read & write + typedef enum int { + HostReqNone = 0, + HostReqReadOnly = 1, + HostReqWriteOnly = 2, + HostReqReadWrite = 3 + } host_req_type_e; + + string msg_id = "dv_utils_pkg"; + + // Simple function to set max errors before quitting sim + function automatic void set_max_quit_count(int n); + uvm_report_server report_server = uvm_report_server::get_server(); + report_server.set_max_quit_count(n); + endfunction + + // return if uvm_fatal occurred + function automatic bit has_uvm_fatal_occurred(); + uvm_report_server report_server = uvm_report_server::get_server(); + return report_server.get_severity_count(UVM_FATAL) > 0; + endfunction + + // task that waits for the specfied timeout + task automatic wait_timeout(input uint timeout_ns, + input string error_msg_id = msg_id, + input string error_msg = "timeout occurred!", + input bit report_fatal = 1); + #(timeout_ns * 1ns); + if (report_fatal) `uvm_fatal(error_msg_id, error_msg) + else `uvm_error(error_msg_id, error_msg) + endtask : wait_timeout + + // get masked data based on provided byte mask; if csr reg handle is provided (optional) then + // masked bytes from csr's mirrored value are returned, else masked bytes are 0's + function automatic bit [TL_DW-1:0] get_masked_data(bit [TL_DW-1:0] data, + bit [TL_DBW-1:0] mask, + uvm_reg csr = null); + bit [TL_DW-1:0] csr_data; + csr_data = (csr != null) ? csr.get_mirrored_value() : '0; + get_masked_data = data; + foreach (mask[i]) + if (~mask[i]) get_masked_data[i * 8 +: 8] = csr_data[i * 8 +: 8]; + endfunction + + // get absolute value of the input. Usage: absolute(val) or absolute(a - b) + function automatic uint absolute(int val); + return val >= 0 ? val : -val; + endfunction + + // endian swap + function automatic logic [31:0] endian_swap(logic [31:0] data); + return {<<8{data}}; + endfunction + + // create a sequence by name and return the handle of uvm_sequence + function automatic uvm_sequence create_seq_by_name(string seq_name); + uvm_object obj; + uvm_factory factory; + uvm_sequence seq; + + factory = uvm_factory::get(); + obj = factory.create_object_by_name(seq_name, "", seq_name); + if (obj == null) begin + // print factory overrides to help debug + factory.print(1); + `uvm_fatal(msg_id, $sformatf("could not create %0s seq", seq_name)) + end + if (!$cast(seq, obj)) begin + `uvm_fatal(msg_id, $sformatf("cast failed - %0s is not a uvm_sequence", seq_name)) + end + return seq; + endfunction + + // sources + `include "dv_report_server.sv" + +endpackage diff --git a/vendor/lowrisc_ip/dvsim/Deploy.py b/vendor/lowrisc_ip/dvsim/Deploy.py new file mode 100644 index 00000000..285317f9 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/Deploy.py @@ -0,0 +1,873 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r""" +Classes +""" + +import logging as log +import pprint +import random +import re +import shlex +import sys +import time +from collections import OrderedDict + +import hjson +from tabulate import tabulate + +from utils import * + + +class Deploy(): + """ + Abstraction for deploying builds and runs. + """ + + # Timer in hours, minutes and seconds. + hh = 0 + mm = 0 + ss = 0 + + # Maintain a list of dispatched items. + dispatch_counter = 0 + + # Misc common deploy settings. + print_legend = True + print_interval = 5 + max_parallel = 16 + max_odirs = 5 + + def __self_str__(self): + if log.getLogger().isEnabledFor(VERBOSE): + return pprint.pformat(self.__dict__) + else: + ret = self.cmd + if self.sub != []: ret += "\nSub:\n" + str(self.sub) + return ret + + def __str__(self): + return self.__self_str__() + + def __repr__(self): + return self.__self_str__() + + def __init__(self, sim_cfg): + # Cross ref the whole cfg object for ease. + self.sim_cfg = sim_cfg + + # Common vars + self.identifier = "" + self.cmd = "" + self.odir = "" + self.log = "" + self.fail_msg = "" + + # Flag to indicate whether to 'overwrite' if odir already exists, + # or to backup the existing one and create a new one. + # For builds, we want to overwrite existing to leverage the tools' + # incremental / partition compile features. For runs, we may want to + # create a new one. + self.renew_odir = False + + # List of vars required to be exported to sub-shell + self.exports = {} + + # Deploy sub commands + self.sub = [] + + # Process + self.process = None + self.log_fd = None + self.status = None + + # These are command, output directory and log file + self.mandatory_misc_attrs.update({ + "name": False, + "build_mode": False, + "flow_makefile": False, + "exports": False, + "dry_run": False + }) + + # Function to parse a dict and extract the mandatory cmd and misc attrs. + def parse_dict(self, ddict): + if not hasattr(self, "target"): + log.error( + "Class %s does not have the mandatory attribute \"target\" defined", + self.__class__.__name__) + sys.exit(1) + + ddict_keys = ddict.keys() + for key in self.mandatory_cmd_attrs.keys(): + if self.mandatory_cmd_attrs[key] == False: + if key in ddict_keys: + setattr(self, key, ddict[key]) + self.mandatory_cmd_attrs[key] = True + + for key in self.mandatory_misc_attrs.keys(): + if self.mandatory_misc_attrs[key] == False: + if key in ddict_keys: + setattr(self, key, ddict[key]) + self.mandatory_misc_attrs[key] = True + + def __post_init__(self): + # Ensure all mandatory attrs are set + for attr in self.mandatory_cmd_attrs.keys(): + if self.mandatory_cmd_attrs[attr] is False: + log.error("Attribute \"%s\" not found for \"%s\".", attr, + self.name) + sys.exit(1) + + for attr in self.mandatory_misc_attrs.keys(): + if self.mandatory_misc_attrs[attr] is False: + log.error("Attribute \"%s\" not found for \"%s\".", attr, + self.name) + sys.exit(1) + + # Recursively search and replace wildcards + self.__dict__ = find_and_substitute_wildcards(self.__dict__, + self.__dict__) + + # Set identifier. + self.identifier = self.sim_cfg.name + ":" + self.name + + # Set the command, output dir and log + self.odir = getattr(self, self.target + "_dir") + # Set the output dir link name to the basename of odir (by default) + self.odir_ln = os.path.basename(os.path.normpath(self.odir)) + self.log = self.odir + "/" + self.target + ".log" + + # If using LSF, redirect stdout and err to the log file + self.cmd = self.construct_cmd() + + def construct_cmd(self): + cmd = "make -f " + self.flow_makefile + " " + self.target + if self.dry_run is True: + cmd += " -n" + for attr in self.mandatory_cmd_attrs.keys(): + value = getattr(self, attr) + if type(value) is list: + pretty_value = [] + for item in value: + pretty_value.append(item.strip()) + value = " ".join(pretty_value) + if type(value) is bool: + value = int(value) + if type(value) is str: + value = value.strip() + cmd += " " + attr + "=\"" + str(value) + "\"" + + # TODO: If not running locally, redirect stdout and err to the log file + # self.cmd += " > " + self.log + " 2>&1 &" + return cmd + + def dispatch_cmd(self): + self.exports.update(os.environ) + args = shlex.split(self.cmd) + try: + # If renew_odir flag is True - then move it. + if self.renew_odir: self.odir_limiter(odir=self.odir) + os.system("mkdir -p " + self.odir) + os.system("ln -s " + self.odir + " " + self.sim_cfg.links['D'] + + '/' + self.odir_ln) + f = open(self.log, "w") + self.process = subprocess.Popen(args, + bufsize=4096, + universal_newlines=True, + stdout=f, + stderr=f, + env=self.exports) + self.log_fd = f + self.status = "D" + Deploy.dispatch_counter += 1 + except IOError: + log.error('IO Error: See %s', self.log) + if self.log_fd: self.log_fd.close() + self.status = "K" + + def odir_limiter(self, odir, max_odirs=-1): + '''Function to backup previously run output directory to maintain a + history of a limited number of output directories. It deletes the output + directory with the oldest timestamps, if the limit is reached. It returns + a list of directories that remain after deletion. + Arguments: + odir: The output directory to backup + max_odirs: Maximum output directories to maintain as history. + + Returns: + dirs: Space-separated list of directories that remain after deletion. + ''' + try: + # If output directory exists, back it up. + if os.path.exists(odir): + ts = run_cmd("date '+" + self.sim_cfg.ts_format + "' -d \"" + + "$(stat -c '%y' " + odir + ")\"") + os.system('mv ' + odir + " " + odir + "_" + ts) + except IOError: + log.error('Failed to back up existing output directory %s', odir) + + dirs = "" + # Delete older directories. + try: + pdir = os.path.realpath(odir + "/..") + # Fatal out if pdir got set to root. + if pdir == "/": + log.fatal( + "Something went wrong while processing \"%s\": odir = \"%s\"", + self.name, odir) + sys.exit(1) + + if os.path.exists(pdir): + find_cmd = "find " + pdir + " -mindepth 1 -maxdepth 1 -type d " + dirs = run_cmd(find_cmd) + dirs = dirs.replace('\n', ' ') + list_dirs = dirs.split() + num_dirs = len(list_dirs) + if max_odirs == -1: max_odirs = self.max_odirs + num_rm_dirs = num_dirs - max_odirs + if num_rm_dirs > -1: + rm_dirs = run_cmd(find_cmd + + "-printf '%T+ %p\n' | sort | head -n " + + str(num_rm_dirs + 1) + + " | awk '{print $2}'") + rm_dirs = rm_dirs.replace('\n', ' ') + dirs = dirs.replace(rm_dirs, "") + os.system("/bin/rm -rf " + rm_dirs) + except IOError: + log.error("Failed to delete old run directories!") + return dirs + + def set_status(self): + self.status = 'P' + if self.dry_run is False: + for fail_pattern in self.fail_patterns: + # Return error messege with the following 4 lines. + grep_cmd = "grep -m 1 -A 4 -E \'" + fail_pattern + "\' " + self.log + (status, rslt) = subprocess.getstatusoutput(grep_cmd) + if rslt: + msg = "```\n{}\n```\n".format(rslt) + self.fail_msg += msg + log.log(VERBOSE, msg) + self.status = 'F' + break + + # If fail patterns were not encountered, but the job returned with non-zero exit code + # for whatever reason, then show the last 10 lines of the log as the failure message, + # which might help with the debug. + if self.process.returncode != 0 and not self.fail_msg: + msg = "Last 10 lines of the log:
\n" + self.fail_msg += msg + log.log(VERBOSE, msg) + get_fail_msg_cmd = "tail -n 10 " + self.log + msg = run_cmd(get_fail_msg_cmd) + msg = "```\n{}\n```\n".format(msg) + self.fail_msg += msg + log.log(VERBOSE, msg) + self.status = "F" + + # Return if status is fail - no need to look for pass patterns. + if self.status == 'F': return + + # If fail patterns were not found, ensure pass patterns indeed were. + for pass_pattern in self.pass_patterns: + grep_cmd = "grep -c -m 1 -E \'" + pass_pattern + "\' " + self.log + (status, rslt) = subprocess.getstatusoutput(grep_cmd) + if rslt == "0": + msg = "Pass pattern \"{}\" not found.
\n".format( + pass_pattern) + self.fail_msg += msg + log.log(VERBOSE, msg) + self.status = 'F' + break + + # Recursively set sub-item's status if parent item fails + def set_sub_status(self, status): + if self.sub == []: return + for sub_item in self.sub: + sub_item.status = status + sub_item.set_sub_status(status) + + def link_odir(self): + if self.status == '.': + log.error("Method unexpectedly called!") + else: + old_link = self.sim_cfg.links['D'] + "/" + self.odir_ln + new_link = self.sim_cfg.links[self.status] + "/" + self.odir_ln + cmd = "ln -s " + self.odir + " " + new_link + "; " + cmd += "rm " + old_link + try: + os.system(cmd) + except Exception as e: + log.error("Cmd \"%s\" could not be run", cmd) + + def get_status(self): + if self.status != "D": return + if self.process.poll() is not None: + self.log_fd.close() + self.set_status() + + log.debug("Item %s has completed execution: %s", self.name, + self.status) + Deploy.dispatch_counter -= 1 + self.link_odir() + del self.process + + @staticmethod + def increment_timer(): + # sub function that increments with overflow = 60 + def _incr_ovf_60(val): + if val >= 59: + val = 0 + return val, True + else: + val += 1 + return val, False + + incr_hh = False + Deploy.ss, incr_mm = _incr_ovf_60(Deploy.ss) + if incr_mm: Deploy.mm, incr_hh = _incr_ovf_60(Deploy.mm) + if incr_hh: Deploy.hh += 1 + + @staticmethod + def deploy(items): + dispatched_items = [] + queued_items = [] + + # Print timer val in hh:mm:ss. + def get_timer_val(): + return "%02i:%02i:%02i" % (Deploy.hh, Deploy.mm, Deploy.ss) + + # Check if elapsed time has reached the next print interval. + def has_print_interval_reached(): + # Deploy.print_interval is expected to be < 1 hour. + return (((Deploy.mm * 60 + Deploy.ss) % + Deploy.print_interval) == 0) + + def dispatch_items(items): + item_names = OrderedDict() + for item in items: + if item.target not in item_names.keys(): + item_names[item.target] = "" + if item.status is None: + item_names[item.target] += item.identifier + ", " + item.dispatch_cmd() + + for target in item_names.keys(): + if item_names[target] != "": + item_names[target] = " [" + item_names[target][:-2] + "]" + log.log(VERBOSE, "[%s]: [%s]: [dispatch]:\n%s", + get_timer_val(), target, item_names[target]) + + # Initialize status for a target, add '_stats_' for the said target + # and initialize counters for queued, dispatched, passed, failed, + # killed and total to 0. Also adds a boolean key to indicate if all + # items in a given target are done. + def init_status_target_stats(status, target): + status[target] = OrderedDict() + status[target]['_stats_'] = OrderedDict() + status[target]['_stats_']['Q'] = 0 + status[target]['_stats_']['D'] = 0 + status[target]['_stats_']['P'] = 0 + status[target]['_stats_']['F'] = 0 + status[target]['_stats_']['K'] = 0 + status[target]['_stats_']['T'] = 0 + status[target]['_done_'] = False + + # Update status counter for a newly queued item. + def add_status_target_queued(status, item): + if item.target not in status.keys(): + init_status_target_stats(status, item.target) + status[item.target][item] = "Q" + status[item.target]['_stats_']['Q'] += 1 + status[item.target]['_stats_']['T'] += 1 + + # Update status counters for a target. + def update_status_target_stats(status, item): + old_status = status[item.target][item] + status[item.target]['_stats_'][old_status] -= 1 + status[item.target]['_stats_'][item.status] += 1 + status[item.target][item] = item.status + + def check_if_done_and_print_status(status, print_status_flag): + all_done = True + for target in status.keys(): + target_done_prev = status[target]['_done_'] + target_done_curr = ((status[target]['_stats_']["Q"] == 0) and + (status[target]['_stats_']["D"] == 0)) + status[target]['_done_'] = target_done_curr + all_done &= target_done_curr + + # Print if flag is set and target_done is not True for two + # consecutive times. + if not (target_done_prev and + target_done_curr) and print_status_flag: + stats = status[target]['_stats_'] + width = "0{}d".format(len(str(stats["T"]))) + msg = "[" + for s in stats.keys(): + msg += s + ": {:{}}, ".format(stats[s], width) + msg = msg[:-2] + "]" + log.info("[%s]: [%s]: %s", get_timer_val(), target, msg) + return all_done + + # Print legend once at the start of the run. + if Deploy.print_legend: + log.info("[legend]: [Q: queued, D: dispatched, " + "P: passed, F: failed, K: killed, T: total]") + Deploy.print_legend = False + + status = OrderedDict() + print_status_flag = True + + # Queue all items + queued_items = items + for item in queued_items: + add_status_target_queued(status, item) + + all_done = False + while not all_done: + # Get status of dispatched items. + for item in dispatched_items: + if item.status == "D": item.get_status() + if item.status != status[item.target][item]: + print_status_flag = True + if item.status != "D": + if item.status != "P": + # Kill its sub items if item did not pass. + item.set_sub_status("K") + log.error("[%s]: [%s]: [status] [%s: %s]", + get_timer_val(), item.target, + item.identifier, item.status) + else: + log.log(VERBOSE, "[%s]: [%s]: [status] [%s: %s]", + get_timer_val(), item.target, + item.identifier, item.status) + # Queue items' sub-items if it is done. + queued_items.extend(item.sub) + for sub_item in item.sub: + add_status_target_queued(status, sub_item) + update_status_target_stats(status, item) + + # Dispatch items from the queue as slots free up. + all_done = (len(queued_items) == 0) + if not all_done: + num_slots = Deploy.max_parallel - Deploy.dispatch_counter + if num_slots > 0: + if len(queued_items) > num_slots: + dispatch_items(queued_items[0:num_slots]) + dispatched_items.extend(queued_items[0:num_slots]) + queued_items = queued_items[num_slots:] + else: + dispatch_items(queued_items) + dispatched_items.extend(queued_items) + queued_items = [] + + # Check if we are done and print the status periodically. + all_done &= check_if_done_and_print_status(status, + print_status_flag) + + # Advance time by 1s if there is more work to do. + if not all_done: + time.sleep(1) + Deploy.increment_timer() + print_status_flag = has_print_interval_reached() + + +class CompileSim(Deploy): + """ + Abstraction for building the simulation executable. + """ + + # Register all builds with the class + items = [] + + def __init__(self, build_mode, sim_cfg): + self.target = "build" + self.pass_patterns = [] + self.fail_patterns = [] + + self.mandatory_cmd_attrs = { + # tool srcs + "tool_srcs": False, + "tool_srcs_dir": False, + + # RAL gen + "skip_ral": False, + "gen_ral_pkg_cmd": False, + "gen_ral_pkg_dir": False, + "gen_ral_pkg_opts": False, + + # Flist gen + "sv_flist_gen_cmd": False, + "sv_flist_gen_dir": False, + "sv_flist_gen_opts": False, + + # Build + "build_dir": False, + "build_cmd": False, + "build_opts": False + } + + self.mandatory_misc_attrs = { + "cov_db_dir": False, + "build_pass_patterns": False, + "build_fail_patterns": False + } + + # Initialize + super().__init__(sim_cfg) + super().parse_dict(build_mode.__dict__) + # Call this method again with the sim_cfg dict passed as the object, + # since it may contain additional mandatory attrs. + super().parse_dict(sim_cfg.__dict__) + self.build_mode = self.name + self.pass_patterns = self.build_pass_patterns + self.fail_patterns = self.build_fail_patterns + self.__post_init__() + + # Start fail message construction + self.fail_msg = "\n**BUILD:** {}
\n".format(self.name) + log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '') + self.fail_msg += "**LOG:** $scratch_path/{}
\n".format(log_sub_path) + + CompileSim.items.append(self) + + def dispatch_cmd(self): + # Delete previous cov_db_dir if it exists before dispatching new build. + if os.path.exists(self.cov_db_dir): + os.system("rm -rf " + self.cov_db_dir) + super().dispatch_cmd() + + +class CompileOneShot(Deploy): + """ + Abstraction for building the simulation executable. + """ + + # Register all builds with the class + items = [] + + def __init__(self, build_mode, sim_cfg): + self.target = "build" + self.pass_patterns = [] + self.fail_patterns = [] + + self.mandatory_cmd_attrs = { + # tool srcs + "tool_srcs": False, + "tool_srcs_dir": False, + + # Build + "build_dir": False, + "build_cmd": False, + "build_opts": False, + + # Report processing + "report_cmd": False, + "report_opts": False + } + + self.mandatory_misc_attrs = {} + + # Initialize + super().__init__(sim_cfg) + super().parse_dict(build_mode.__dict__) + # Call this method again with the sim_cfg dict passed as the object, + # since it may contain additional mandatory attrs. + super().parse_dict(sim_cfg.__dict__) + self.build_mode = self.name + self.__post_init__() + + # Start fail message construction + self.fail_msg = "\n**BUILD:** {}
\n".format(self.name) + log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '') + self.fail_msg += "**LOG:** $scratch_path/{}
\n".format(log_sub_path) + + CompileOneShot.items.append(self) + + +class RunTest(Deploy): + """ + Abstraction for running tests. This is one per seed for each test. + """ + + # Initial seed values when running tests (if available). + seeds = [] + fixed_seed = None + + # Register all runs with the class + items = [] + + def __init__(self, index, test, sim_cfg): + self.target = "run" + self.pass_patterns = [] + self.fail_patterns = [] + + self.mandatory_cmd_attrs = { + "proj_root": False, + "uvm_test": False, + "uvm_test_seq": False, + "run_opts": False, + "sw_dir": False, + "sw_name": False, + "sw_build_device": False, + "sw_build_dir": False, + "run_dir": False, + "run_cmd": False, + "run_opts": False + } + + self.mandatory_misc_attrs = { + "run_dir_name": False, + "cov_db_test_dir": False, + "run_pass_patterns": False, + "run_fail_patterns": False + } + + self.index = index + self.seed = RunTest.get_seed() + + # Initialize + super().__init__(sim_cfg) + super().parse_dict(test.__dict__) + # Call this method again with the sim_cfg dict passed as the object, + # since it may contain additional mandatory attrs. + super().parse_dict(sim_cfg.__dict__) + self.test = self.name + self.renew_odir = True + self.build_mode = test.build_mode.name + self.pass_patterns = self.run_pass_patterns + self.fail_patterns = self.run_fail_patterns + self.__post_init__() + # For output dir link, use run_dir_name instead. + self.odir_ln = self.run_dir_name + + # Start fail message construction + self.fail_msg = "\n**TEST:** {}, ".format(self.name) + self.fail_msg += "**SEED:** {}
\n".format(self.seed) + log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '') + self.fail_msg += "**LOG:** $scratch_path/{}
\n".format(log_sub_path) + + RunTest.items.append(self) + + def __post_init__(self): + super().__post_init__() + # Set identifier. + self.identifier = self.sim_cfg.name + ":" + self.run_dir_name + + def get_status(self): + '''Override base class get_status implementation for additional post-status + actions.''' + super().get_status() + if self.status not in ["D", "P"]: + # Delete the coverage data if available. + if os.path.exists(self.cov_db_test_dir): + log.log(VERBOSE, "Deleting coverage data of failing test:\n%s", + self.cov_db_test_dir) + os.system("/bin/rm -rf " + self.cov_db_test_dir) + + @staticmethod + def get_seed(): + # If --seeds option is passed, then those custom seeds are consumed + # first. If --fixed-seed is also passed, the subsequent tests + # (once the custom seeds are consumed) will be run with the fixed seed. + if not RunTest.seeds: + if RunTest.fixed_seed: return RunTest.fixed_seed + for i in range(1000): + seed = random.getrandbits(32) + RunTest.seeds.append(seed) + return RunTest.seeds.pop(0) + + +class CovMerge(Deploy): + """ + Abstraction for merging coverage databases. An item of this class is created AFTER + the regression is completed. + """ + + # Register all builds with the class + items = [] + + def __init__(self, sim_cfg): + self.target = "cov_merge" + self.pass_patterns = [] + self.fail_patterns = [] + + # Construct local 'special' variable from cov directories that need to + # be merged. + self.cov_db_dirs = "" + + self.mandatory_cmd_attrs = { + "cov_merge_cmd": False, + "cov_merge_opts": False + } + + self.mandatory_misc_attrs = { + "cov_merge_dir": False, + "cov_merge_db_dir": False + } + + # Initialize + super().__init__(sim_cfg) + super().parse_dict(sim_cfg.__dict__) + self.__post_init__() + + # Override standard output and log patterns. + self.odir = self.cov_merge_db_dir + self.odir_ln = os.path.basename(os.path.normpath(self.odir)) + + # Start fail message construction + self.fail_msg = "\n**COV_MERGE:** {}
\n".format(self.name) + log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '') + self.fail_msg += "**LOG:** $scratch_path/{}
\n".format(log_sub_path) + + CovMerge.items.append(self) + + def __post_init__(self): + # Add cov db dirs from all the builds that were kicked off. + for bld in self.sim_cfg.builds: + self.cov_db_dirs += bld.cov_db_dir + " " + + # Recursively search and replace wildcards, ignoring cov_db_dirs for now. + # We need to resolve it later based on cov_db_dirs value set below. + self.__dict__ = find_and_substitute_wildcards( + self.__dict__, self.__dict__, ignored_wildcards=["cov_db_dirs"]) + + # Prune previous merged cov directories. + prev_cov_db_dirs = self.odir_limiter(odir=self.cov_merge_db_dir) + + # If a merged cov data base exists from a previous run, then consider + # that as well for merging, if the --cov-merge-previous command line + # switch is passed. + if self.sim_cfg.cov_merge_previous: + self.cov_db_dirs += prev_cov_db_dirs + + # Call base class __post_init__ to do checks and substitutions + super().__post_init__() + + +class CovReport(Deploy): + """ + Abstraction for coverage report generation. An item of this class is created AFTER + the regression is completed. + """ + + # Register all builds with the class + items = [] + + def __init__(self, sim_cfg): + self.target = "cov_report" + self.pass_patterns = [] + self.fail_patterns = [] + self.cov_total = "" + self.cov_results = "" + + self.mandatory_cmd_attrs = { + "cov_report_cmd": False, + "cov_report_opts": False + } + + self.mandatory_misc_attrs = { + "cov_report_dir": False, + "cov_merge_db_dir": False, + "cov_report_dashboard": False + } + + # Initialize + super().__init__(sim_cfg) + super().parse_dict(sim_cfg.__dict__) + self.__post_init__() + + # Start fail message construction + self.fail_msg = "\n**COV_REPORT:** {}
\n".format(self.name) + log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '') + self.fail_msg += "**LOG:** $scratch_path/{}
\n".format(log_sub_path) + + CovReport.items.append(self) + + def get_status(self): + super().get_status() + # Once passed, extract the cov results summary from the dashboard. + if self.status == "P": + try: + with open(self.cov_report_dashboard, 'r') as f: + for line in f: + match = re.match("total coverage summary", line, + re.IGNORECASE) + if match: + results = [] + # Metrics on the next line. + line = f.readline().strip() + results.append(line.split()) + # Values on the next. + line = f.readline().strip() + # Pretty up the values - add % sign for ease of post + # processing. + values = [] + for val in line.split(): + val += " %" + values.append(val) + # first row is coverage total + self.cov_total = values[0] + results.append(values) + colalign = (("center", ) * len(values)) + self.cov_results = tabulate(results, + headers="firstrow", + tablefmt="pipe", + colalign=colalign) + break + + except Exception as e: + ex_msg = "Failed to parse \"{}\":\n{}".format( + self.cov_report_dashboard, str(e)) + self.fail_msg += ex_msg + log.error(ex_msg) + self.status = "F" + + if self.cov_results == "": + nf_msg = "Coverage summary not found in the reports dashboard!" + self.fail_msg += nf_msg + log.error(nf_msg) + self.status = "F" + + if self.status == "P": + # Delete the cov report - not needed. + os.system("rm -rf " + self.log) + + +class CovAnalyze(Deploy): + """ + Abstraction for coverage analysis tool. + """ + + # Register all builds with the class + items = [] + + def __init__(self, sim_cfg): + self.target = "cov_analyze" + self.pass_patterns = [] + self.fail_patterns = [] + + self.mandatory_cmd_attrs = { + "cov_analyze_cmd": False, + "cov_analyze_opts": False + } + + self.mandatory_misc_attrs = { + "cov_analyze_dir": False, + "cov_merge_db_dir": False + } + + # Initialize + super().__init__(sim_cfg) + super().parse_dict(sim_cfg.__dict__) + self.__post_init__() + + # Start fail message construction + self.fail_msg = "\n**COV_ANALYZE:** {}
\n".format(self.name) + log_sub_path = self.log.replace(self.sim_cfg.scratch_path + '/', '') + self.fail_msg += "**LOG:** $scratch_path/{}
\n".format(log_sub_path) + + CovAnalyze.items.append(self) diff --git a/vendor/lowrisc_ip/dvsim/FlowCfg.py b/vendor/lowrisc_ip/dvsim/FlowCfg.py new file mode 100644 index 00000000..5fc91d23 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/FlowCfg.py @@ -0,0 +1,628 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r""" +Class describing a flow configuration object +""" + +import datetime +import logging as log +import pprint +from shutil import which + +import hjson + +from Deploy import * +from utils import * + + +# Interface class for extensions. +class FlowCfg(): + def __str__(self): + return pprint.pformat(self.__dict__) + + def __repr__(self): + return pprint.pformat(self.__dict__) + + def __init__(self, flow_cfg_file, proj_root, args): + # Options set from command line + self.items = [] + self.items.extend(args.items) + self.list_items = [] + self.list_items.extend(args.list) + self.flow_cfg_file = flow_cfg_file + self.proj_root = proj_root + self.args = args + self.scratch_root = args.scratch_root + self.branch = args.branch + self.job_prefix = args.job_prefix + + # Options set from hjson cfg. + self.project = "" + self.scratch_path = "" + + # Imported cfg files using 'import_cfgs' keyword + self.imported_cfg_files = [] + self.imported_cfg_files.append(flow_cfg_file) + + # Add exports using 'exports' keyword - these are exported to the child + # process' environment. + self.exports = [] + + # Add overrides using the overrides keyword - existing attributes + # are overridden with the override values. + self.overrides = [] + + # List of cfgs if the parsed cfg is a master cfg list + self.cfgs = [] + + # Add a notion of "master" cfg - this is indicated using + # a special key 'use_cfgs' within the hjson cfg. + self.is_master_cfg = False + + # Timestamp + self.ts_format_long = args.ts_format_long + self.timestamp_long = args.timestamp_long + self.ts_format = args.ts_format + self.timestamp = args.timestamp + + # Results + self.errors_seen = False + self.rel_path = "" + self.results_title = "" + self.results_server_prefix = "" + self.results_server_url_prefix = "" + self.results_server_cmd = "" + self.results_server_css_path = "" + self.results_server_path = "" + self.results_server_dir = "" + self.results_server_html = "" + self.results_server_page = "" + self.results_summary_server_html = "" + self.results_summary_server_page = "" + + # Full and summary results in md text. + self.results_md = "" + self.results_summary_md = "" + + def __post_init__(self): + # Run some post init checks + if not self.is_master_cfg: + # Check if self.cfgs is a list of exactly 1 item (self) + if not (len(self.cfgs) == 1 and self.cfgs[0].name == self.name): + log.error("Parse error!\n%s", self.cfgs) + sys.exit(1) + + @staticmethod + def create_instance(flow_cfg_file, proj_root, args): + '''Create a new instance of this class as with given parameters. + ''' + return FlowCfg(flow_cfg_file, proj_root, args) + + def parse_flow_cfg(self, flow_cfg_file, is_entry_point=True): + ''' + Parse the flow cfg hjson file. This is a private API used within the + extended class' __init__ function. This parses the hjson cfg (and + imports / use cfgs) and builds an initial dictionary. + + This method takes 2 args. + flow_cfg_file: This is the flow cfg file to be parsed. + is_entry_point: the cfg file that is passed on the command line is + the entry point cfg. If the cfg file is a part of an inport_cfgs + or use_cfgs key, then it is not an entry point. + ''' + hjson_dict = parse_hjson(flow_cfg_file) + + # Check if this is the master cfg, if this is the entry point cfg file + if is_entry_point: + self.is_master_cfg = self.check_if_master_cfg(hjson_dict) + + # If not a master cfg, then register self with self.cfgs + if self.is_master_cfg is False: + self.cfgs.append(self) + + # Resolve the raw hjson dict to build this object + self.resolve_hjson_raw(hjson_dict) + + def _post_parse_flow_cfg(self): + '''Hook to set some defaults not found in the flow cfg hjson files. + This function has to be called manually after calling the parse_flow_cfg(). + ''' + if self.rel_path == "": + self.rel_path = os.path.dirname(self.flow_cfg_file).replace( + self.proj_root + '/', '') + + def check_if_master_cfg(self, hjson_dict): + # This is a master cfg only if it has a single key called "use_cfgs" + # which contains a list of actual flow cfgs. + hjson_cfg_dict_keys = hjson_dict.keys() + return ("use_cfgs" in hjson_cfg_dict_keys and \ + type(hjson_dict["use_cfgs"]) is list) + + def resolve_hjson_raw(self, hjson_dict): + attrs = self.__dict__.keys() + rm_hjson_dict_keys = [] + import_cfgs = [] + use_cfgs = [] + for key in hjson_dict.keys(): + if key in attrs: + hjson_dict_val = hjson_dict[key] + self_val = getattr(self, key) + scalar_types = {str: [""], int: [0, -1], bool: [False]} + + # Case 1: key value in class and hjson_dict differ - error! + if type(hjson_dict_val) != type(self_val): + log.error("Conflicting key types: \"%s\" {\"%s, \"%s\"}", + key, + type(hjson_dict_val).__name__, + type(self_val).__name__) + sys.exit(1) + + # Case 2: key value in class and hjson_dict are strs - set if + # not already set, else error! + elif type(hjson_dict_val) in scalar_types.keys(): + defaults = scalar_types[type(hjson_dict_val)] + if self_val == hjson_dict_val: + rm_hjson_dict_keys.append(key) + elif self_val in defaults and not hjson_dict_val in defaults: + setattr(self, key, hjson_dict_val) + rm_hjson_dict_keys.append(key) + elif not self_val in defaults and not hjson_dict_val in defaults: + # check if key exists in command line args, use that, or + # throw conflicting error + # TODO, may throw the conflicting error but choose one and proceed rather + # than exit + override_with_args_val = False + if hasattr(self.args, key): + args_val = getattr(self.args, key) + if type(args_val) == str and args_val != "": + setattr(self, key, args_val) + override_with_args_val = True + if not override_with_args_val: + log.error( + "Conflicting values {\"%s\", \"%s\"} encountered for key \"%s\"", + str(self_val), str(hjson_dict_val), key) + sys.exit(1) + + # Case 3: key value in class and hjson_dict are lists - merge'em + elif type(hjson_dict_val) is list and type(self_val) is list: + self_val.extend(hjson_dict_val) + setattr(self, key, self_val) + rm_hjson_dict_keys.append(key) + + # Case 4: unknown issue + else: + log.error( + "Type of \"%s\" (%s) in %s appears to be invalid (should be %s)", + key, + type(hjson_dict_val).__name__, hjson_dict, + type(self_val).__name__) + sys.exit(1) + + # If key is 'import_cfgs' then add to the list of cfgs to + # process + elif key == 'import_cfgs': + import_cfgs.extend(hjson_dict[key]) + rm_hjson_dict_keys.append(key) + + # If this is a master cfg list and the key is 'use_cfgs' + elif self.is_master_cfg and key == "use_cfgs": + use_cfgs.extend(hjson_dict[key]) + + # If this is a not master cfg list and the key is 'use_cfgs' + elif not self.is_master_cfg and key == "use_cfgs": + # Throw an error and exit + log.error( + "Key \"use_cfgs\" encountered in a non-master cfg file list \"%s\"", + self.flow_cfg_file) + sys.exit(1) + + else: + # add key-value to class + setattr(self, key, hjson_dict[key]) + rm_hjson_dict_keys.append(key) + + # Parse imported cfgs + for cfg_file in import_cfgs: + if not cfg_file in self.imported_cfg_files: + self.imported_cfg_files.append(cfg_file) + # Substitute wildcards in cfg_file files since we need to process + # them right away. + cfg_file = subst_wildcards(cfg_file, self.__dict__) + self.parse_flow_cfg(cfg_file, False) + else: + log.error("Cfg file \"%s\" has already been parsed", cfg_file) + + # Parse master cfg files + if self.is_master_cfg: + for entry in use_cfgs: + if type(entry) is str: + # Treat this as a file entry + # Substitute wildcards in cfg_file files since we need to process + # them right away. + cfg_file = subst_wildcards(entry, + self.__dict__, + ignore_error=True) + self.cfgs.append( + self.create_instance(cfg_file, self.proj_root, + self.args)) + + elif type(entry) is dict: + # Treat this as a cfg expanded in-line + temp_cfg_file = self._conv_inline_cfg_to_hjson(entry) + if not temp_cfg_file: continue + self.cfgs.append( + self.create_instance(temp_cfg_file, self.proj_root, + self.args)) + + # Delete the temp_cfg_file once the instance is created + try: + log.log(VERBOSE, "Deleting temp cfg file:\n%s", + temp_cfg_file) + os.system("/bin/rm -rf " + temp_cfg_file) + except IOError: + log.error("Failed to remove temp cfg file:\n%s", + temp_cfg_file) + + else: + log.error( + "Type of entry \"%s\" in the \"use_cfgs\" key is invalid: %s", + entry, str(type(entry))) + sys.exit(1) + + def _conv_inline_cfg_to_hjson(self, idict): + '''Dump a temp hjson file in the scratch space from input dict. + This method is to be called only by a master cfg''' + + if not self.is_master_cfg: + log.fatal("This method can only be called by a master cfg") + sys.exit(1) + + name = idict["name"] if "name" in idict.keys() else None + if not name: + log.error( + "In-line entry in use_cfgs list does not contain " + \ + "a \"name\" key (will be skipped!):\n%s", idict) + return None + + # Check if temp cfg file already exists + temp_cfg_file = self.scratch_root + "/." + self.branch + "__" + \ + name + "_cfg.hjson" + + # Create the file and dump the dict as hjson + log.log(VERBOSE, "Dumping inline cfg \"%s\" in hjson to:\n%s", name, + temp_cfg_file) + try: + with open(temp_cfg_file, "w") as f: + f.write(hjson.dumps(idict, for_json=True)) + except Exception as e: + log.error( + "Failed to hjson-dump temp cfg file\"%s\" for \"%s\"" + \ + "(will be skipped!) due to:\n%s", temp_cfg_file, name, e) + return None + + # Return the temp cfg file created + return temp_cfg_file + + def _process_overrides(self): + # Look through the dict and find available overrides. + # If override is available, check if the type of the value for existing + # and the overridden keys are the same. + overrides_dict = {} + if hasattr(self, "overrides"): + overrides = getattr(self, "overrides") + if type(overrides) is not list: + log.error( + "The type of key \"overrides\" is %s - it should be a list", + type(overrides)) + sys.exit(1) + + # Process override one by one + for item in overrides: + if type(item) is dict and set(item.keys()) == set( + ["name", "value"]): + ov_name = item["name"] + ov_value = item["value"] + if ov_name not in overrides_dict.keys(): + overrides_dict[ov_name] = ov_value + self._do_override(ov_name, ov_value) + else: + log.error( + "Override for key \"%s\" already exists!\nOld: %s\nNew: %s", + ov_name, overrides_dict[ov_name], ov_value) + sys.exit(1) + else: + log.error("\"overrides\" is a list of dicts with {\"name\": , " + \ + "\"value\": } pairs. Found this instead:\n%s", + str(item)) + sys.exit(1) + + def _do_override(self, ov_name, ov_value): + # Go through self attributes and replace with overrides + if hasattr(self, ov_name): + orig_value = getattr(self, ov_name) + if type(orig_value) == type(ov_value): + log.debug("Overriding \"%s\" value \"%s\" with \"%s\"", + ov_name, orig_value, ov_value) + setattr(self, ov_name, ov_value) + else: + log.error("The type of override value \"%s\" for \"%s\" mismatches " + \ + "the type of original value \"%s\"", + ov_value, ov_name, orig_value) + sys.exit(1) + else: + log.error("Override key \"%s\" not found in the cfg!", ov_name) + sys.exit(1) + + def _process_exports(self): + # Convert 'exports' to dict + exports_dict = {} + if self.exports != []: + for item in self.exports: + if type(item) is dict: + exports_dict.update(item) + elif type(item) is str: + [key, value] = item.split(':', 1) + if type(key) is not str: key = str(key) + if type(value) is not str: value = str(value) + exports_dict.update({key.strip(): value.strip()}) + else: + log.error("Type error in \"exports\": %s", str(item)) + sys.exit(1) + self.exports = exports_dict + + def _purge(self): + '''Purge the existing scratch areas in preperation for the new run.''' + return + + def purge(self): + '''Public facing API for _purge(). + ''' + for item in self.cfgs: + item._purge() + + def _print_list(self): + '''Print the list of available items that can be kicked off. + ''' + return + + def print_list(self): + '''Public facing API for _print_list(). + ''' + for item in self.cfgs: + item._print_list() + + def _create_deploy_objects(self): + '''Create deploy objects from items that were passed on for being run. + The deploy objects for build and run are created from the objects that were + created from the create_objects() method. + ''' + return + + def create_deploy_objects(self): + '''Public facing API for _create_deploy_objects(). + ''' + if self.is_master_cfg: + self.deploy = [] + for item in self.cfgs: + item._create_deploy_objects() + self.deploy.extend(item.deploy) + else: + self._create_deploy_objects() + + def deploy_objects(self): + '''Public facing API for deploying all available objects.''' + Deploy.deploy(self.deploy) + + def _gen_results(self, fmt="md"): + ''' + The function is called after the regression has completed. It collates the + status of all run targets and generates a dict. It parses the testplan and + maps the generated result to the testplan entries to generate a final table + (list). It also prints the full list of failures for debug / triage. The + final result is in markdown format. + ''' + return + + def gen_results(self): + '''Public facing API for _gen_results(). + ''' + results = [] + for item in self.cfgs: + result = item._gen_results() + log.info("[results]: [%s]:\n%s\n\n", item.name, result) + results.append(result) + self.errors_seen |= item.errors_seen + + if self.is_master_cfg: self.gen_results_summary() + + def gen_results_summary(self): + '''Public facing API to generate summary results for each IP/cfg file + ''' + return + + def _get_results_page_link(self, link_text): + if not self.args.publish: return link_text + results_page_url = self.results_server_page.replace( + self.results_server_prefix, self.results_server_url_prefix) + return "[%s](%s)" % (link_text, results_page_url) + + def _publish_results(self): + '''Publish results to the opentitan web server. + Results are uploaded to {results_server_path}/latest/results. + If the 'latest' directory exists, then it is renamed to its 'timestamp' directory. + If the list of directories in this area is > 14, then the oldest entry is removed. + Links to the last 7 regression results are appended at the end if the results page. + ''' + if which('gsutil') is None or which('gcloud') is None: + log.error( + "Google cloud SDK not installed! Cannot access the results server" + ) + return + + # Construct the paths + results_page_url = self.results_server_page.replace( + self.results_server_prefix, self.results_server_url_prefix) + + # Timeformat for moving the dir + tf = "%Y.%m.%d_%H.%M.%S" + + # Extract the timestamp of the existing self.results_server_page + cmd = self.results_server_cmd + " ls -L " + self.results_server_page + \ + " | grep \'Creation time:\'" + + log.log(VERBOSE, cmd) + cmd_output = subprocess.run(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + log.log(VERBOSE, cmd_output.stdout.decode("utf-8")) + old_results_ts = cmd_output.stdout.decode("utf-8") + old_results_ts = old_results_ts.replace("Creation time:", "") + old_results_ts = old_results_ts.strip() + + # Move the 'latest' to its timestamp directory if lookup succeeded + if cmd_output.returncode == 0: + try: + if old_results_ts != "": + ts = datetime.datetime.strptime( + old_results_ts, "%a, %d %b %Y %H:%M:%S %Z") + old_results_ts = ts.strftime(tf) + except ValueError as e: + log.error( + "%s: \'%s\' Timestamp conversion value error raised!", e) + old_results_ts = "" + + # If the timestamp conversion failed - then create a dummy one with + # yesterday's date. + if old_results_ts == "": + log.log(VERBOSE, + "Creating dummy timestamp with yesterday's date") + ts = datetime.datetime.now( + datetime.timezone.utc) - datetime.timedelta(days=1) + old_results_ts = ts.strftime(tf) + + old_results_dir = self.results_server_path + "/" + old_results_ts + cmd = self.results_server_cmd + " mv " + self.results_server_dir + \ + " " + old_results_dir + log.log(VERBOSE, cmd) + cmd_output = subprocess.run(cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + log.log(VERBOSE, cmd_output.stdout.decode("utf-8")) + if cmd_output.returncode != 0: + log.error("Failed to mv old results page \"%s\" to \"%s\"!", + self.results_server_dir, old_results_dir) + + # Do an ls in the results root dir to check what directories exist. + results_dirs = [] + cmd = self.results_server_cmd + " ls " + self.results_server_path + log.log(VERBOSE, cmd) + cmd_output = subprocess.run(args=cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + log.log(VERBOSE, cmd_output.stdout.decode("utf-8")) + if cmd_output.returncode == 0: + # Some directories exist. Check if 'latest' is one of them + results_dirs = cmd_output.stdout.decode("utf-8").strip() + results_dirs = results_dirs.split("\n") + else: + log.log(VERBOSE, "Failed to run \"%s\"!", cmd) + + # Start pruning + log.log(VERBOSE, "Pruning %s area to limit last 7 results", + self.results_server_path) + + rdirs = [] + for rdir in results_dirs: + dirname = rdir.replace(self.results_server_path, '') + dirname = dirname.replace('/', '') + if dirname == "latest": continue + rdirs.append(dirname) + rdirs.sort(reverse=True) + + rm_cmd = "" + history_txt = "\n## Past Results\n" + history_txt += "- [Latest](" + results_page_url + ")\n" + if len(rdirs) > 0: + for i in range(len(rdirs)): + if i < 7: + rdir_url = self.results_server_path + '/' + rdirs[ + i] + "/" + self.results_server_html + rdir_url = rdir_url.replace(self.results_server_prefix, + self.results_server_url_prefix) + history_txt += "- [{}]({})\n".format(rdirs[i], rdir_url) + elif i > 14: + rm_cmd += self.results_server_path + '/' + rdirs[i] + " " + + if rm_cmd != "": + rm_cmd = self.results_server_cmd + " -m rm -r " + rm_cmd + "; " + + # Append the history to the results. + results_md = self.results_md + history_txt + + # Publish the results page. + # First, write the results html file temporarily to the scratch area. + results_html_file = self.scratch_path + "/results_" + self.timestamp + ".html" + f = open(results_html_file, 'w') + f.write( + md_results_to_html(self.results_title, + self.results_server_css_path, results_md)) + f.close() + rm_cmd += "/bin/rm -rf " + results_html_file + "; " + + log.info("Publishing results to %s", results_page_url) + cmd = self.results_server_cmd + " cp " + results_html_file + " " + \ + self.results_server_page + "; " + rm_cmd + log.log(VERBOSE, cmd) + try: + cmd_output = subprocess.run(args=cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + log.log(VERBOSE, cmd_output.stdout.decode("utf-8")) + except Exception as e: + log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd)) + + def publish_results(self): + '''Public facing API for publishing results to the opentitan web server. + ''' + for item in self.cfgs: + item._publish_results() + + if self.is_master_cfg: self.publish_results_summary() + + def publish_results_summary(self): + '''Public facing API for publishing md format results to the opentitan web server. + ''' + results_html_file = "summary_" + self.timestamp + ".html" + results_page_url = self.results_summary_server_page.replace( + self.results_server_prefix, self.results_server_url_prefix) + + # Publish the results page. + # First, write the results html file temporarily to the scratch area. + f = open(results_html_file, 'w') + f.write( + md_results_to_html(self.results_title, + self.results_server_css_path, + self.results_summary_md)) + f.close() + rm_cmd = "/bin/rm -rf " + results_html_file + "; " + + log.info("Publishing results summary to %s", results_page_url) + cmd = self.results_server_cmd + " cp " + results_html_file + " " + \ + self.results_summary_server_page + "; " + rm_cmd + log.log(VERBOSE, cmd) + try: + cmd_output = subprocess.run(args=cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + log.log(VERBOSE, cmd_output.stdout.decode("utf-8")) + except Exception as e: + log.error("%s: Failed to publish results:\n\"%s\"", e, str(cmd)) + + def has_errors(self): + return self.errors_seen diff --git a/vendor/lowrisc_ip/dvsim/LintCfg.py b/vendor/lowrisc_ip/dvsim/LintCfg.py new file mode 100644 index 00000000..da1f1cbf --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/LintCfg.py @@ -0,0 +1,218 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r""" +Class describing lint configuration object +""" + +import logging as log +import sys +from pathlib import Path + +from tabulate import tabulate + +from Deploy import * +from Modes import * +from OneShotCfg import OneShotCfg +from utils import * + + +# helper function for printing messages +def _print_msg_list(msg_list_name, msg_list): + md_results = "" + if msg_list: + md_results += "### %s\n" % msg_list_name + md_results += "```\n" + for msg in msg_list: + msg_parts = msg.split() + md_results += msg + "\n\n" + md_results += "```\n" + return md_results + + +class LintCfg(OneShotCfg): + """Derivative class for linting purposes. + """ + def __init__(self, flow_cfg_file, proj_root, args): + super().__init__(flow_cfg_file, proj_root, args) + + def __post_init__(self): + super().__post_init__() + # Set the title for lint results. + self.results_title = self.name.upper() + " Lint Results" + + @staticmethod + def create_instance(flow_cfg_file, proj_root, args): + '''Create a new instance of this class as with given parameters. + ''' + return LintCfg(flow_cfg_file, proj_root, args) + + def gen_results_summary(self): + ''' + Gathers the aggregated results from all sub configs + ''' + + # Generate results table for runs. Note that we build a simple md and + # a marked up md version here in parallel. + log.info("Create summary of lint results") + + results_str = "## " + self.results_title + " (Summary)\n\n" + results_str += "### " + self.timestamp_long + "\n\n" + + header = [ + "Name", "Tool Warnings", "Tool Errors", "Lint Warnings", + "Lint Errors" + ] + colalign = ("center", ) * len(header) + table = [header] + + for cfg in self.cfgs: + + results_page = cfg.results_server_dir + '/results.html' + results_page_url = results_page.replace( + cfg.results_server_prefix, cfg.results_server_url_prefix) + name_with_link = "[" + cfg.name.upper( + ) + "](" + results_page_url + ")" + table.append([ + name_with_link, + str(len(cfg.result_summary["warnings"])) + " W", + str(len(cfg.result_summary["errors"])) + " E", + str(len(cfg.result_summary["lint_warnings"])) + " W", + str(len(cfg.result_summary["lint_errors"])) + " E" + ]) + + if len(table) > 1: + self.results_summary_md = results_str + tabulate( + table, headers="firstrow", tablefmt="pipe", + colalign=colalign) + "\n" + else: + self.results_summary_md = results_str + "\nNo results to display.\n" + + print(self.results_summary_md) + + # Return only the tables + return self.results_summary_md + + def _gen_results(self): + # ''' + # The function is called after the regression has completed. It looks + # for a regr_results.hjson file with aggregated results from the lint run. + # The hjson needs to have the following (potentially empty) fields + # + # { + # tool: "" + # errors: [] + # warnings: [] + # lint_errors: [] + # lint_warning: [] + # lint_infos: [] + # } + # + # where each entry is a string representing a lint message. This allows + # to reuse the same LintCfg class with different tools since just the + # parsing script that transforms the tool output into the hjson above + # needs to be adapted. + # + # note that if this is a master config, the results will + # be generated using the _gen_results_summary function + # ''' + + # Generate results table for runs. + # Note that we build a simple md and a marked up md version + # here in parallel. + results_str = "## " + self.results_title + "\n\n" + results_str += "### " + self.timestamp_long + "\n" + results_str += "### Lint Tool: " + self.tool.upper() + "\n\n" + + header = [ + "Build Mode", "Tool Warnings", "Tool Errors", "Lint Warnings", + "Lint Errors" + ] + colalign = ("center", ) * len(header) + table = [header] + + # aggregated counts + self.result_summary["warnings"] = [] + self.result_summary["errors"] = [] + self.result_summary["lint_warnings"] = [] + self.result_summary["lint_errors"] = [] + + fail_msgs = "" + for mode in self.build_modes: + + result_data = Path( + subst_wildcards(self.build_dir, {"build_mode": mode.name}) + + '/results.hjson') + log.info("looking for result data file at %s", result_data) + + try: + with open(result_data, "r") as results_file: + self.result = hjson.load(results_file, use_decimal=True) + except IOError as err: + log.warning("%s", err) + self.result = { + "tool": "", + "errors": ["IOError: %s" % err], + "warnings": [], + "lint_errors": [], + "lint_warnings": [], + "lint_infos": [] + } + if self.result: + table.append([ + mode.name, + str(len(self.result["warnings"])) + " W ", + str(len(self.result["errors"])) + " E", + # We currently do not publish these infos at + # the moment len(self.result["lint_infos"]), + str(len(self.result["lint_warnings"])) + " W", + str(len(self.result["lint_errors"])) + " E" + ]) + else: + self.result = { + "tool": "", + "errors": [], + "warnings": [], + "lint_errors": [], + "lint_warnings": [], + "lint_infos": [] + } + + self.result_summary["warnings"] += self.result["warnings"] + self.result_summary["errors"] += self.result["errors"] + self.result_summary["lint_warnings"] += self.result[ + "lint_warnings"] + self.result_summary["lint_errors"] += self.result["lint_errors"] + + # Append detailed messages if they exist + if sum([ + len(self.result["warnings"]), + len(self.result["errors"]), + len(self.result["lint_warnings"]), + len(self.result["lint_errors"]) + ]): + fail_msgs += "\n## Errors and Warnings for Build Mode `'" + mode.name + "'`\n" + fail_msgs += _print_msg_list("Tool Errors", + self.result["errors"]) + fail_msgs += _print_msg_list("Tool Warnings", + self.result["warnings"]) + fail_msgs += _print_msg_list("Lint Errors", + self.result["lint_errors"]) + fail_msgs += _print_msg_list("Lint Warnings", + self.result["lint_warnings"]) + #fail_msgs += _print_msg_list("Lint Infos", results["lint_infos"]) + + if len(table) > 1: + self.results_md = results_str + tabulate( + table, headers="firstrow", tablefmt="pipe", + colalign=colalign) + "\n" + fail_msgs + else: + self.results_md = results_str + "\nNo results to display.\n" + + # Write results to the scratch area + self.results_file = self.scratch_path + "/results_" + self.timestamp + ".md" + with open(self.results_file, 'w') as f: + f.write(self.results_md) + + log.info("[results page]: [%s] [%s]", self.name, results_file) + return self.results_md diff --git a/vendor/lowrisc_ip/dvsim/Modes.py b/vendor/lowrisc_ip/dvsim/Modes.py new file mode 100644 index 00000000..3a757c97 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/Modes.py @@ -0,0 +1,547 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r""" +Classes +""" + +import logging as log +import pprint +import re +import sys + +import hjson + +from utils import * + + +class Modes(): + """ + Abstraction for specifying collection of options called as 'modes'. This is + the base class which is extended for run_modes, build_modes, tests and regressions. + """ + def self_str(self): + ''' + This is used to construct the string representation of the entire class object. + ''' + tname = "" + if self.type != "": tname = self.type + "_" + if self.mname != "": tname += self.mname + if log.getLogger().isEnabledFor(VERBOSE): + return "\n<---" + tname + ":\n" + pprint.pformat(self.__dict__) + \ + "\n--->\n" + else: + return tname + ":" + self.name + + def __str__(self): + return self.self_str() + + def __repr__(self): + return self.self_str() + + def __init__(self, mdict): + keys = mdict.keys() + attrs = self.__dict__.keys() + + if not 'name' in keys: + log.error("Key \"name\" missing in mode %s", mdict) + sys.exit(1) + + if not hasattr(self, "type"): + log.fatal("Key \"type\" is missing or invalid") + sys.exit(1) + + if not hasattr(self, "mname"): self.mname = "" + + for key in keys: + if key not in attrs: + log.error("Key %s in %s is invalid", key, mdict) + sys.exit(1) + setattr(self, key, mdict[key]) + + def get_sub_modes(self): + sub_modes = [] + if hasattr(self, "en_" + self.type + "_modes"): + sub_modes = getattr(self, "en_" + self.type + "_modes") + return sub_modes + + def set_sub_modes(self, sub_modes): + setattr(self, "en_" + self.type + "_modes", sub_modes) + + def merge_mode(self, mode): + ''' + Merge a new mode with self. + Merge sub mode specified with 'en_*_modes with self. + ''' + + sub_modes = self.get_sub_modes() + is_sub_mode = mode.name in sub_modes + + if not mode.name == self.name and not is_sub_mode: + return False + + # only merge the lists; if strs are different, then throw an error + attrs = self.__dict__.keys() + for attr in attrs: + # merge lists together + self_attr_val = getattr(self, attr) + mode_attr_val = getattr(mode, attr) + + if type(self_attr_val) is list: + self_attr_val.extend(mode_attr_val) + setattr(self, attr, self_attr_val) + + elif not is_sub_mode or attr not in ["name", "mname"]: + self.check_conflict(mode.name, attr, mode_attr_val) + + # Check newly appended sub_modes, remove 'self' and duplicates + sub_modes = self.get_sub_modes() + + if sub_modes != []: + new_sub_modes = [] + for sub_mode in sub_modes: + if not self.name == sub_mode and not sub_mode in new_sub_modes: + new_sub_modes.append(sub_mode) + self.set_sub_modes(new_sub_modes) + return True + + def check_conflict(self, name, attr, mode_attr_val): + self_attr_val = getattr(self, attr) + if self_attr_val == mode_attr_val: return + + default_val = None + if type(self_attr_val) is int: + default_val = -1 + elif type(self_attr_val) is str: + default_val = "" + + if self_attr_val != default_val and mode_attr_val != default_val: + log.error( + "mode %s cannot be merged into %s due to conflicting %s {%s, %s}", + name, self.name, attr, str(self_attr_val), str(mode_attr_val)) + sys.exit(1) + elif self_attr_val == default_val: + self_attr_val = mode_attr_val + setattr(self, attr, self_attr_val) + + @staticmethod + def create_modes(ModeType, mdicts): + ''' + Create modes of type ModeType from a given list of raw dicts + Process dependencies. + Return a list of modes objects. + ''' + def merge_sub_modes(mode, parent, objs): + # Check if there are modes available to merge + sub_modes = mode.get_sub_modes() + if sub_modes == []: return + + # Set parent if it is None. If not, check cyclic dependency + if parent is None: + parent = mode + else: + if mode.name == parent.name: + log.error("Cyclic dependency when processing mode \"%s\"", + mode.name) + sys.exit(1) + + for sub_mode in sub_modes: + # Find the sub_mode obj from str + found = False + for obj in objs: + if sub_mode == obj.name: + # First recursively merge the sub_modes + merge_sub_modes(obj, parent, objs) + + # Now merge the sub mode with mode + mode.merge_mode(obj) + found = True + break + if not found: + log.error( + "Sub mode \"%s\" added to mode \"%s\" was not found!", + sub_mode, mode.name) + sys.exit(1) + + modes_objs = [] + # create a default mode if available + default_mode = ModeType.get_default_mode() + if default_mode is not None: modes_objs.append(default_mode) + + # Process list of raw dicts that represent the modes + # Pass 1: Create unique set of modes by merging modes with the same name + for mdict in mdicts: + # Create a new item + new_mode_merged = False + new_mode = ModeType(mdict) + for mode in modes_objs: + # Merge new one with existing if available + if mode.name == new_mode.name: + mode.merge_mode(new_mode) + new_mode_merged = True + break + + # Add the new mode to the list if not already appended + if not new_mode_merged: + modes_objs.append(new_mode) + ModeType.item_names.append(new_mode.name) + + # Pass 2: Recursively expand sub modes within parent modes + for mode in modes_objs: + merge_sub_modes(mode, None, modes_objs) + + # Return the list of objects + return modes_objs + + @staticmethod + def get_default_mode(ModeType): + return None + + @staticmethod + def find_mode(mode_name, modes): + ''' + Given a mode_name in string, go through list of modes and return the mode with + the string that matches. Thrown an error and return None if nothing was found. + ''' + found = False + for mode in modes: + if mode_name == mode.name: + return mode + return None + + @staticmethod + def find_and_merge_modes(mode, mode_names, modes, merge_modes=True): + ''' + ''' + found_mode_objs = [] + for mode_name in mode_names: + sub_mode = Modes.find_mode(mode_name, modes) + if sub_mode is not None: + found_mode_objs.append(sub_mode) + if merge_modes is True: mode.merge_mode(sub_mode) + else: + log.error("Mode \"%s\" enabled within mode \"%s\" not found!", + mode_name, mode.name) + sys.exit(1) + return found_mode_objs + + +class BuildModes(Modes): + """ + Build modes. + """ + + # Maintain a list of build_modes str + item_names = [] + + def __init__(self, bdict): + self.name = "" + self.type = "build" + if not hasattr(self, "mname"): self.mname = "mode" + self.is_sim_mode = 0 + self.build_opts = [] + self.run_opts = [] + self.en_build_modes = [] + + super().__init__(bdict) + self.en_build_modes = list(set(self.en_build_modes)) + + @staticmethod + def get_default_mode(): + return BuildModes({"name": "default"}) + + +class RunModes(Modes): + """ + Run modes. + """ + + # Maintain a list of run_modes str + item_names = [] + + def __init__(self, rdict): + self.name = "" + self.type = "run" + if not hasattr(self, "mname"): self.mname = "mode" + self.reseed = -1 + self.run_opts = [] + self.uvm_test = "" + self.uvm_test_seq = "" + self.build_mode = "" + self.en_run_modes = [] + self.sw_dir = "" + self.sw_name = "" + self.sw_build_device = "" + + super().__init__(rdict) + self.en_run_modes = list(set(self.en_run_modes)) + + @staticmethod + def get_default_mode(): + return None + + +class Tests(RunModes): + """ + Abstraction for tests. The RunModes abstraction can be reused here with a few + modifications. + """ + + # Maintain a list of tests str + item_names = [] + + # TODO: This info should be passed via hjson + defaults = { + "reseed": -1, + "uvm_test": "", + "uvm_test_seq": "", + "build_mode": "", + "sw_dir": "", + "sw_name": "", + "sw_build_device": "", + } + + def __init__(self, tdict): + if not hasattr(self, "mname"): self.mname = "test" + super().__init__(tdict) + + @staticmethod + def create_tests(tdicts, sim_cfg): + ''' + Create Tests from a given list of raw dicts. + TODO: enhance the raw dict to include file scoped defaults. + Process enabled run modes and the set build mode. + Return a list of test objects. + ''' + def get_pruned_en_run_modes(test_en_run_modes, global_en_run_modes): + pruned_en_run_modes = [] + for test_en_run_mode in test_en_run_modes: + if test_en_run_mode not in global_en_run_modes: + pruned_en_run_modes.append(test_en_run_mode) + return pruned_en_run_modes + + tests_objs = [] + # Pass 1: Create unique set of tests by merging tests with the same name + for tdict in tdicts: + # Create a new item + new_test_merged = False + new_test = Tests(tdict) + for test in tests_objs: + # Merge new one with existing if available + if test.name == new_test.name: + test.merge_mode(new_test) + new_test_merged = True + break + + # Add the new test to the list if not already appended + if not new_test_merged: + tests_objs.append(new_test) + Tests.item_names.append(new_test.name) + + # Pass 2: Process dependencies + build_modes = [] + if hasattr(sim_cfg, "build_modes"): + build_modes = getattr(sim_cfg, "build_modes") + + run_modes = [] + if hasattr(sim_cfg, "run_modes"): + run_modes = getattr(sim_cfg, "run_modes") + + attrs = Tests.defaults + for test_obj in tests_objs: + # Unpack run_modes first + en_run_modes = get_pruned_en_run_modes(test_obj.en_run_modes, + sim_cfg.en_run_modes) + Modes.find_and_merge_modes(test_obj, en_run_modes, run_modes) + + # Find and set the missing attributes from sim_cfg + # If not found in sim_cfg either, then throw a warning + # TODO: These should be file-scoped + for attr in attrs.keys(): + # Check if attr value is default + val = getattr(test_obj, attr) + default_val = attrs[attr] + if val == default_val: + global_val = None + # Check if we can find a default in sim_cfg + if hasattr(sim_cfg, attr): + global_val = getattr(sim_cfg, attr) + + if global_val is not None and global_val != default_val: + setattr(test_obj, attr, global_val) + + # Unpack the build mode for this test + build_mode_objs = Modes.find_and_merge_modes(test_obj, + [test_obj.build_mode], + build_modes, + merge_modes=False) + test_obj.build_mode = build_mode_objs[0] + + # Error if set build mode is actually a sim mode + if test_obj.build_mode.is_sim_mode is True: + log.error( + "Test \"%s\" uses build_mode %s which is actually a sim mode", + test_obj.name, test_obj.build_mode.name) + sys.exit(1) + + # Merge build_mode's run_opts with self + test_obj.run_opts.extend(test_obj.build_mode.run_opts) + + # Return the list of tests + return tests_objs + + @staticmethod + def merge_global_opts(tests, global_build_opts, global_run_opts): + processed_build_modes = [] + for test in tests: + if test.build_mode.name not in processed_build_modes: + test.build_mode.build_opts.extend(global_build_opts) + processed_build_modes.append(test.build_mode.name) + test.run_opts.extend(global_run_opts) + + +class Regressions(Modes): + """ + Abstraction for test sets / regression sets. + """ + + # Maintain a list of tests str + item_names = [] + + # TODO: define __repr__ and __str__ to print list of tests if VERBOSE + + def __init__(self, regdict): + self.name = "" + self.type = "" + if not hasattr(self, "mname"): self.mname = "regression" + self.tests = [] + self.reseed = -1 + self.test_names = [] + self.excl_tests = [] # TODO: add support for this + self.en_sim_modes = [] + self.en_run_modes = [] + self.build_opts = [] + self.run_opts = [] + super().__init__(regdict) + + @staticmethod + def create_regressions(regdicts, sim_cfg, tests): + ''' + Create Test sets from a given list of raw dicts. + Return a list of test set objects. + ''' + + regressions_objs = [] + # Pass 1: Create unique set of test sets by merging test sets with the same name + for regdict in regdicts: + # Create a new item + new_regression_merged = False + new_regression = Regressions(regdict) + + # Check for name conflicts with tests before merging + if new_regression.name in Tests.item_names: + log.error("Test names and regression names are required to be unique. " + \ + "The regression \"%s\" bears the same name with an existing test. ", + new_regression.name) + sys.exit(1) + + for regression in regressions_objs: + # Merge new one with existing if available + if regression.name == new_regression.name: + regression.merge_mode(new_regression) + new_regression_merged = True + break + + # Add the new test to the list if not already appended + if not new_regression_merged: + regressions_objs.append(new_regression) + Regressions.item_names.append(new_regression.name) + + # Pass 2: Process dependencies + build_modes = [] + if hasattr(sim_cfg, "build_modes"): + build_modes = getattr(sim_cfg, "build_modes") + + run_modes = [] + if hasattr(sim_cfg, "run_modes"): + run_modes = getattr(sim_cfg, "run_modes") + + for regression_obj in regressions_objs: + # Unpack the sim modes + found_sim_mode_objs = Modes.find_and_merge_modes( + regression_obj, regression_obj.en_sim_modes, build_modes, + False) + + for sim_mode_obj in found_sim_mode_objs: + if sim_mode_obj.is_sim_mode == 0: + log.error( + "Enabled mode \"%s\" within the regression \"%s\" is not a sim mode", + sim_mode_obj.name, regression_obj.name) + sys.exit(1) + + # Check if sim_mode_obj's sub-modes are a part of regressions's + # sim modes- if yes, then it will cause duplication of opts + # Throw an error and exit. + for sim_mode_obj_sub in sim_mode_obj.en_build_modes: + if sim_mode_obj_sub in regression_obj.en_sim_modes: + log.error("Regression \"%s\" enables sim_modes \"%s\" and \"%s\". " + \ + "The former is already a sub_mode of the latter.", + regression_obj.name, sim_mode_obj_sub, sim_mode_obj.name) + sys.exit(1) + + # Check if sim_mode_obj is also passed on the command line, in + # which case, skip + if sim_mode_obj.name in sim_cfg.en_build_modes: + continue + + # Merge the build and run opts from the sim modes + regression_obj.build_opts.extend(sim_mode_obj.build_opts) + regression_obj.run_opts.extend(sim_mode_obj.run_opts) + + # Unpack the run_modes + # TODO: If there are other params other than run_opts throw an error and exit + found_run_mode_objs = Modes.find_and_merge_modes( + regression_obj, regression_obj.en_run_modes, run_modes, False) + + # Only merge the run_opts from the run_modes enabled + for run_mode_obj in found_run_mode_objs: + # Check if run_mode_obj is also passed on the command line, in + # which case, skip + if run_mode_obj.name in sim_cfg.en_run_modes: + continue + self.run_opts.extend(run_mode_obj.run_opts) + + # Unpack tests + if regression_obj.tests == []: + log.log(VERBOSE, + "Unpacking all tests in scope for regression \"%s\"", + regression_obj.name) + regression_obj.tests = sim_cfg.tests + regression_obj.test_names = Tests.item_names + + else: + tests_objs = [] + regression_obj.test_names = regression_obj.tests + for test in regression_obj.tests: + test_obj = Modes.find_mode(test, sim_cfg.tests) + if test_obj is None: + log.error( + "Test \"%s\" added to regression \"%s\" not found!", + test, regression_obj.name) + tests_objs.append(test_obj) + regression_obj.tests = tests_objs + + # Return the list of tests + return regressions_objs + + def merge_regression_opts(self): + processed_build_modes = [] + for test in self.tests: + if test.build_mode.name not in processed_build_modes: + test.build_mode.build_opts.extend(self.build_opts) + processed_build_modes.append(test.build_mode.name) + test.run_opts.extend(self.run_opts) + + # Override reseed if available. + if self.reseed != -1: + test.reseed = self.reseed diff --git a/vendor/lowrisc_ip/dvsim/OneShotCfg.py b/vendor/lowrisc_ip/dvsim/OneShotCfg.py new file mode 100644 index 00000000..1b01bdb6 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/OneShotCfg.py @@ -0,0 +1,188 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r""" +Class describing a one-shot build configuration object +""" + +import logging as log +import sys +from collections import OrderedDict + +from Deploy import * +from FlowCfg import FlowCfg +from Modes import * +from utils import * + + +class OneShotCfg(FlowCfg): + """Simple one-shot build flow for non-simulation targets like + linting, synthesis and FPV. + """ + def __init__(self, flow_cfg_file, proj_root, args): + super().__init__(flow_cfg_file, proj_root, args) + + # Options set from command line + self.tool = args.tool + self.email = args.email + self.verbose = args.verbose + self.build_cmd = "" + self.build_opts = [] + self.report_cmd = "" + self.report_opts = [] + self.build_opts.extend(args.build_opts) + self.build_unique = args.build_unique + self.build_only = args.build_only + + # Options built from cfg_file files + self.project = "" + self.flow = "" + self.flow_makefile = "" + self.scratch_path = "" + self.build_dir = "" + self.run_dir = "" + self.sw_build_dir = "" + self.pass_patterns = [] + self.fail_patterns = [] + self.name = "" + self.dut = "" + self.fusesoc_core = "" + self.ral_spec = "" + self.build_modes = [] + self.run_modes = [] + self.regressions = [] + + # Flow results + self.result = OrderedDict() + self.result_summary = OrderedDict() + + self.dry_run = args.dry_run + + # Not needed for this build + self.verbosity = "" + self.en_build_modes = [] + + # Generated data structures + self.links = {} + self.build_list = [] + self.deploy = [] + + # Parse the cfg_file file tree + self.parse_flow_cfg(flow_cfg_file) + self._post_parse_flow_cfg() + + # If build_unique is set, then add current timestamp to uniquify it + if self.build_unique: + self.build_dir += "_" + self.timestamp + + # Process overrides before substituting the wildcards. + self._process_overrides() + + # Make substitutions, while ignoring the following wildcards + # TODO: Find a way to set these in sim cfg instead + ignored_wildcards = ["build_mode", "index", "test"] + self.__dict__ = find_and_substitute_wildcards(self.__dict__, + self.__dict__, + ignored_wildcards) + + # Stuff below only pertains to individual cfg (not master cfg). + if not self.is_master_cfg: + # Print info + log.info("[scratch_dir]: [%s]: [%s]", self.name, self.scratch_path) + + # Set directories with links for ease of debug / triage. + self.links = { + "D": self.scratch_path + "/" + "dispatched", + "P": self.scratch_path + "/" + "passed", + "F": self.scratch_path + "/" + "failed", + "K": self.scratch_path + "/" + "killed" + } + + # Use the default build mode for tests that do not specify it + if not hasattr(self, "build_mode"): + setattr(self, "build_mode", "default") + + self._process_exports() + + # Create objects from raw dicts - build_modes, sim_modes, run_modes, + # tests and regressions, only if not a master cfg obj + self._create_objects() + + # Post init checks + self.__post_init__() + + def __post_init__(self): + # Run some post init checks + super().__post_init__() + + @staticmethod + def create_instance(flow_cfg_file, proj_root, args): + '''Create a new instance of this class as with given parameters. + ''' + return OneShotCfg(flow_cfg_file, proj_root, args) + + # Purge the output directories. This operates on self. + def _purge(self): + if self.scratch_path: + try: + log.info("Purging scratch path %s", self.scratch_path) + os.system("/bin/rm -rf " + self.scratch_path) + except IOError: + log.error('Failed to purge scratch directory %s', + self.scratch_path) + + def _create_objects(self): + # Create build and run modes objects + build_modes = Modes.create_modes(BuildModes, + getattr(self, "build_modes")) + setattr(self, "build_modes", build_modes) + + # All defined build modes are being built, h + # ence extend all with the global opts. + for build_mode in build_modes: + build_mode.build_opts.extend(self.build_opts) + + def _print_list(self): + for list_item in self.list_items: + log.info("---- List of %s in %s ----", list_item, self.name) + if hasattr(self, list_item): + items = getattr(self, list_item) + for item in items: + log.info(item) + else: + log.error("Item %s does not exist!", list_item) + + def _create_dirs(self): + '''Create initial set of directories + ''' + # Invoking system calls has a performance penalty. + # Construct a single command line chained with '&&' to invoke + # the system call only once, rather than multiple times. + create_link_dirs_cmd = "" + for link in self.links.keys(): + create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && " + create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && " + create_link_dirs_cmd += " true" + + try: + os.system(create_link_dirs_cmd) + except IOError: + log.error("Error running when running the cmd \"%s\"", + create_link_dirs_cmd) + sys.exit(1) + + def _create_deploy_objects(self): + '''Create deploy objects from build modes + ''' + builds = [] + build_map = {} + for build in self.build_modes: + item = CompileOneShot(build, self) + builds.append(item) + build_map[build] = item + + self.builds = builds + self.deploy = builds + + # Create initial set of directories before kicking off the regression. + self._create_dirs() diff --git a/vendor/lowrisc_ip/dvsim/SimCfg.py b/vendor/lowrisc_ip/dvsim/SimCfg.py new file mode 100644 index 00000000..3a3219e6 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/SimCfg.py @@ -0,0 +1,570 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r""" +Class describing simulation configuration object +""" + +import logging as log +import sys +from collections import OrderedDict + +from Deploy import * +from FlowCfg import FlowCfg +from Modes import * +from testplanner import class_defs, testplan_utils +from utils import * + + +class SimCfg(FlowCfg): + """Simulation configuration object + + A simulation configuration class holds key information required for building a DV + regression framework. + """ + def __init__(self, flow_cfg_file, proj_root, args): + super().__init__(flow_cfg_file, proj_root, args) + # Options set from command line + self.tool = args.tool + self.build_opts = [] + self.build_opts.extend(args.build_opts) + self.en_build_modes = [] + self.en_build_modes.extend(args.build_modes) + self.run_opts = [] + self.run_opts.extend(args.run_opts) + self.en_run_modes = [] + self.en_run_modes.extend(args.run_modes) + self.build_unique = args.build_unique + self.build_only = args.build_only + self.run_only = args.run_only + self.reseed_ovrd = args.reseed + self.reseed_multiplier = args.reseed_multiplier + self.waves = args.waves + self.dump = args.dump + self.max_waves = args.max_waves + self.cov = args.cov + self.cov_merge_previous = args.cov_merge_previous + self.profile = args.profile + self.xprop_off = args.xprop_off + self.no_rerun = args.no_rerun + self.verbosity = "{" + args.verbosity + "}" + self.email = args.email + self.verbose = args.verbose + self.dry_run = args.dry_run + self.skip_ral = args.skip_ral + self.map_full_testplan = args.map_full_testplan + + # Set default sim modes for unpacking + if self.waves is True: self.en_build_modes.append("waves") + if self.cov is True: self.en_build_modes.append("cov") + if self.profile != 'none': self.en_build_modes.append("profile") + if self.xprop_off is not True: self.en_build_modes.append("xprop") + + # Options built from cfg_file files + self.project = "" + self.flow = "" + self.flow_makefile = "" + self.build_dir = "" + self.run_dir = "" + self.sw_build_dir = "" + self.pass_patterns = [] + self.fail_patterns = [] + self.name = "" + self.dut = "" + self.tb = "" + self.testplan = "" + self.fusesoc_core = "" + self.ral_spec = "" + self.build_modes = [] + self.run_modes = [] + self.regressions = [] + + # Options from tools - for building and running tests + self.build_cmd = "" + self.flist_gen_cmd = "" + self.flist_gen_opts = [] + self.flist_file = "" + self.run_cmd = "" + self.dump_file = "" + + # Generated data structures + self.links = {} + self.build_list = [] + self.run_list = [] + self.deploy = [] + self.cov_merge_deploy = None + self.cov_report_deploy = None + self.results_summary = OrderedDict() + + # If is_master_cfg is set, then each cfg will have its own cov_deploy. + # Maintain an array of those in cov_deploys. + self.cov_deploys = [] + + # Parse the cfg_file file tree + self.parse_flow_cfg(flow_cfg_file) + self._post_parse_flow_cfg() + + # If build_unique is set, then add current timestamp to uniquify it + if self.build_unique: + self.build_dir += "_" + self.timestamp + + # Process overrides before substituting the wildcards. + self._process_overrides() + + # Make substitutions, while ignoring the following wildcards + # TODO: Find a way to set these in sim cfg instead + ignored_wildcards = [ + "build_mode", "index", "test", "seed", "uvm_test", "uvm_test_seq", + "cov_db_dirs", "sw_dir", "sw_name", "sw_build_device" + ] + self.__dict__ = find_and_substitute_wildcards(self.__dict__, + self.__dict__, + ignored_wildcards, + self.is_master_cfg) + + # Set the title for simulation results. + self.results_title = self.name.upper() + " Simulation Results" + + # Stuff below only pertains to individual cfg (not master cfg). + if not self.is_master_cfg: + # Print info + log.info("[scratch_dir]: [%s]: [%s]", self.name, self.scratch_path) + + # Set directories with links for ease of debug / triage. + self.links = { + "D": self.scratch_path + "/" + "dispatched", + "P": self.scratch_path + "/" + "passed", + "F": self.scratch_path + "/" + "failed", + "K": self.scratch_path + "/" + "killed" + } + + # Use the default build mode for tests that do not specify it + if not hasattr(self, "build_mode"): + setattr(self, "build_mode", "default") + + self._process_exports() + + # Create objects from raw dicts - build_modes, sim_modes, run_modes, + # tests and regressions, only if not a master cfg obj + # TODO: hack to prevent coverage collection if tool != vcs + if self.cov and self.tool != "vcs": + self.cov = False + log.warning( + "Coverage collection with tool \"%s\" is not supported yet", + self.tool) + self._create_objects() + + # Post init checks + self.__post_init__() + + def __post_init__(self): + # Run some post init checks + super().__post_init__() + + @staticmethod + def create_instance(flow_cfg_file, proj_root, args): + '''Create a new instance of this class as with given parameters. + ''' + return SimCfg(flow_cfg_file, proj_root, args) + + # Purge the output directories. This operates on self. + def _purge(self): + if self.scratch_path: + try: + log.info("Purging scratch path %s", self.scratch_path) + os.system("/bin/rm -rf " + self.scratch_path) + except IOError: + log.error('Failed to purge scratch directory %s', + self.scratch_path) + + def _create_objects(self): + # Create build and run modes objects + build_modes = Modes.create_modes(BuildModes, + getattr(self, "build_modes")) + setattr(self, "build_modes", build_modes) + + run_modes = Modes.create_modes(RunModes, getattr(self, "run_modes")) + setattr(self, "run_modes", run_modes) + + # Walk through build modes enabled on the CLI and append the opts + for en_build_mode in self.en_build_modes: + build_mode_obj = Modes.find_mode(en_build_mode, build_modes) + if build_mode_obj is not None: + self.build_opts.extend(build_mode_obj.build_opts) + self.run_opts.extend(build_mode_obj.run_opts) + else: + log.error( + "Mode \"%s\" enabled on the the command line is not defined", + en_build_mode) + sys.exit(1) + + # Walk through run modes enabled on the CLI and append the opts + for en_run_mode in self.en_run_modes: + run_mode_obj = Modes.find_mode(en_run_mode, run_modes) + if run_mode_obj is not None: + self.run_opts.extend(run_mode_obj.run_opts) + else: + log.error( + "Mode \"%s\" enabled on the the command line is not defined", + en_run_mode) + sys.exit(1) + + # Create tests from given list of items + tests = Tests.create_tests(getattr(self, "tests"), self) + setattr(self, "tests", tests) + + # Regressions + # Parse testplan if provided. + if self.testplan != "": + self.testplan = testplan_utils.parse_testplan(self.testplan) + # Extract tests in each milestone and add them as regression target. + self.regressions.extend(self.testplan.get_milestone_regressions()) + + # Create regressions + regressions = Regressions.create_regressions( + getattr(self, "regressions"), self, tests) + setattr(self, "regressions", regressions) + + def _print_list(self): + for list_item in self.list_items: + log.info("---- List of %s in %s ----", list_item, self.name) + if hasattr(self, list_item): + items = getattr(self, list_item) + for item in items: + log.info(item) + else: + log.error("Item %s does not exist!", list_item) + + def _create_build_and_run_list(self): + # Walk through the list of items to run and create the build and run + # objects. + # Allow multiple regressions to run as long as the do not enable + # sim_modes or run_modes + def get_overlapping_tests(tests, run_list_names): + overlapping_tests = [] + for test in tests: + if test.name in run_list_names: + overlapping_tests.append(test) + return overlapping_tests + + def prune_items(items, marked_items): + pruned_items = [] + for item in items: + if item not in marked_items: pruned_items.append(item) + return pruned_items + + # Check if there are items to run + if self.items == []: + log.error( + "No items provided for running this simulation / regression") + sys.exit(1) + + items_list = self.items + run_list_names = [] + marked_items = [] + # Process regressions first + for regression in self.regressions: + if regression.name in items_list: + overlapping_tests = get_overlapping_tests( + regression.tests, run_list_names) + if overlapping_tests != []: + log.error( + "Regression \"%s\" added for run contains tests that overlap with " + "other regressions added. This can result in conflicting " + "build / run_opts to be set causing unexpected results.", + regression.name) + sys.exit(1) + + self.run_list.extend(regression.tests) + # Merge regression's build and run opts with its tests and their + # build_modes + regression.merge_regression_opts() + run_list_names.extend(regression.test_names) + marked_items.append(regression.name) + items_list = prune_items(items_list, marked_items) + + # Process individual tests + for test in self.tests: + if test.name in items_list: + overlapping_tests = get_overlapping_tests([test], + run_list_names) + if overlapping_tests == []: + self.run_list.append(test) + run_list_names.append(test.name) + marked_items.append(test.name) + items_list = prune_items(items_list, marked_items) + + # Merge the global build and run opts + Tests.merge_global_opts(self.run_list, self.build_opts, self.run_opts) + + # Check if all items have been processed + if items_list != []: + log.error( + "The items %s added for run were not found in \n%s!\n " + "Use the --list switch to see a list of available " + "tests / regressions.", items_list, self.flow_cfg_file) + + # Process reseed override and create the build_list + build_list_names = [] + for test in self.run_list: + # Override reseed if available. + if self.reseed_ovrd != -1: + test.reseed = self.reseed_ovrd + + # Apply reseed multiplier if set on the command line. + test.reseed *= self.reseed_multiplier + + # Create the unique set of builds needed. + if test.build_mode.name not in build_list_names: + self.build_list.append(test.build_mode) + build_list_names.append(test.build_mode.name) + + def _create_dirs(self): + '''Create initial set of directories + ''' + # Invoking system calls has a performance penalty. + # Construct a single command line chained with '&&' to invoke + # the system call only once, rather than multiple times. + create_link_dirs_cmd = "" + for link in self.links.keys(): + create_link_dirs_cmd += "/bin/rm -rf " + self.links[link] + " && " + create_link_dirs_cmd += "mkdir -p " + self.links[link] + " && " + create_link_dirs_cmd += " true" + + try: + os.system(create_link_dirs_cmd) + except IOError: + log.error("Error running when running the cmd \"%s\"", + create_link_dirs_cmd) + sys.exit(1) + + def _create_deploy_objects(self): + '''Create deploy objects from the build and run lists. + ''' + + # Create the build and run list first + self._create_build_and_run_list() + + builds = [] + build_map = {} + for build in self.build_list: + item = CompileSim(build, self) + builds.append(item) + build_map[build] = item + + runs = [] + for test in self.run_list: + for num in range(test.reseed): + item = RunTest(num, test, self) + if self.build_only is False: + build_map[test.build_mode].sub.append(item) + runs.append(item) + + self.builds = builds + self.runs = runs + if self.run_only is True: + self.deploy = runs + else: + self.deploy = builds + + # Create cov_merge and cov_report objects + if self.cov: + self.cov_merge_deploy = CovMerge(self) + self.cov_report_deploy = CovReport(self) + # Generate reports only if merge was successful; add it as a dependency + # of merge. + self.cov_merge_deploy.sub.append(self.cov_report_deploy) + + # Create initial set of directories before kicking off the regression. + self._create_dirs() + + def create_deploy_objects(self): + '''Public facing API for _create_deploy_objects(). + ''' + super().create_deploy_objects() + + # Also, create cov_deploys + if self.cov: + for item in self.cfgs: + if item.cov: + self.cov_deploys.append(item.cov_merge_deploy) + + # deploy additional commands as needed. We do this separated for coverage + # since that needs to happen at the end. + def deploy_objects(self): + '''This is a public facing API, so we use "self.cfgs" instead of self. + ''' + # Invoke the base class method to run the regression. + super().deploy_objects() + + # If coverage is enabled, then deploy the coverage tasks. + if self.cov: + Deploy.deploy(self.cov_deploys) + + def _cov_analyze(self): + '''Use the last regression coverage data to open up the GUI tool to + analyze the coverage. + ''' + cov_analyze_deploy = CovAnalyze(self) + try: + proc = subprocess.Popen(args=cov_analyze_deploy.cmd, + shell=True, + close_fds=True) + except Exception as e: + log.fatal("Failed to run coverage analysis cmd:\n\"%s\"\n%s", + cov_analyze_deploy.cmd, e) + sys.exit(1) + + def cov_analyze(self): + '''Public facing API for analyzing coverage. + ''' + for item in self.cfgs: + item._cov_analyze() + + def _gen_results(self): + ''' + The function is called after the regression has completed. It collates the + status of all run targets and generates a dict. It parses the testplan and + maps the generated result to the testplan entries to generate a final table + (list). It also prints the full list of failures for debug / triage. If cov + is enabled, then the summary coverage report is also generated. The final + result is in markdown format. + ''' + + # TODO: add support for html + def retrieve_result(name, results): + for item in results: + if name == item["name"]: return item + return None + + def gen_results_sub(items, results, fail_msgs): + ''' + Generate the results table from the test runs (builds are ignored). + The table has 3 columns - name, passing and total as a list of dicts. + This is populated for all tests. The number of passing and total is + in reference to the number of iterations or reseeds for that test. + This list of dicts is directly consumed by the Testplan::results_table + method for testplan mapping / annotation. + ''' + if items == []: return (results, fail_msgs) + for item in items: + if item.status == "F": + fail_msgs += item.fail_msg + + # Generate results table for runs. + if item.target == "run": + result = retrieve_result(item.name, results) + if result is None: + result = {"name": item.name, "passing": 0, "total": 0} + results.append(result) + if item.status == "P": result["passing"] += 1 + result["total"] += 1 + (results, fail_msgs) = gen_results_sub(item.sub, results, + fail_msgs) + return (results, fail_msgs) + + regr_results = [] + fail_msgs = "" + deployed_items = self.deploy + if self.cov: deployed_items.append(self.cov_merge_deploy) + (regr_results, fail_msgs) = gen_results_sub(deployed_items, + regr_results, fail_msgs) + + # Add title if there are indeed failures + if fail_msgs != "": + fail_msgs = "\n## List of Failures\n" + fail_msgs + self.errors_seen = True + + # Generate results table for runs. + results_str = "## " + self.results_title + "\n" + results_str += "### " + self.timestamp_long + "\n" + + # Add path to testplan. + testplan = "https://" + self.doc_server + '/' + self.rel_path + testplan = testplan.replace("/dv", "/doc/dv_plan/#testplan") + results_str += "### [Testplan](" + testplan + ")\n" + results_str += "### Simulator: " + self.tool.upper() + "\n\n" + + if regr_results == []: + results_str += "No results to display.\n" + + else: + # TODO: check if testplan is not null? + # Map regr results to the testplan entries. + results_str += self.testplan.results_table( + regr_results=regr_results, + map_full_testplan=self.map_full_testplan) + results_str += "\n" + self.results_summary = self.testplan.results_summary + + # Append coverage results of coverage was enabled. + if self.cov and self.cov_report_deploy.status == "P": + results_str += "\n## Coverage Results\n" + results_str += "\n### [Coverage Dashboard](cov_report/dashboard.html)\n\n" + results_str += self.cov_report_deploy.cov_results + self.results_summary[ + "Coverage"] = self.cov_report_deploy.cov_total + else: + self.results_summary["Coverage"] = "--" + + # append link of detail result to block name + self.results_summary["Name"] = self._get_results_page_link( + self.results_summary["Name"]) + + # Append failures for triage + self.results_md = results_str + fail_msgs + results_str += fail_msgs + + # Write results to the scratch area + results_file = self.scratch_path + "/results_" + self.timestamp + ".md" + f = open(results_file, 'w') + f.write(self.results_md) + f.close() + + # Return only the tables + log.info("[results page]: [%s] [%s]", self.name, results_file) + return results_str + + def gen_results_summary(self): + + # sim summary result has 5 columns from each SimCfg.results_summary + header = ["Name", "Passing", "Total", "Pass Rate"] + if self.cov: header.append('Coverage') + table = [header] + colalign = ("center", ) * len(header) + for item in self.cfgs: + row = [] + for title in item.results_summary: + row.append(item.results_summary[title]) + if row == []: continue + table.append(row) + self.results_summary_md = "## " + self.results_title + " (Summary)\n" + self.results_summary_md += "### " + self.timestamp_long + "\n" + self.results_summary_md += tabulate(table, + headers="firstrow", + tablefmt="pipe", + colalign=colalign) + print(self.results_summary_md) + return self.results_summary_md + + def _publish_results(self): + '''Publish coverage results to the opentitan web server.''' + super()._publish_results() + + if self.cov: + results_server_dir_url = self.results_server_dir.replace( + self.results_server_prefix, self.results_server_url_prefix) + + log.info("Publishing coverage results to %s", + results_server_dir_url) + cmd = self.results_server_cmd + " -m cp -R " + \ + self.cov_report_deploy.cov_report_dir + " " + self.results_server_dir + try: + cmd_output = subprocess.run(args=cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + log.log(VERBOSE, cmd_output.stdout.decode("utf-8")) + except Exception as e: + log.error("%s: Failed to publish results:\n\"%s\"", e, + str(cmd)) diff --git a/vendor/lowrisc_ip/dvsim/__init__.py b/vendor/lowrisc_ip/dvsim/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vendor/lowrisc_ip/dvsim/dvsim.py b/vendor/lowrisc_ip/dvsim/dvsim.py new file mode 100755 index 00000000..fe053939 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/dvsim.py @@ -0,0 +1,525 @@ +#!/usr/bin/env python3 +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +""" +dvsim is a command line tool to deploy regressions for design verification. It uses hjson as the +format for specifying what to build and run. It is an end-to-end regression manager that can deploy +multiple builds (where some tests might need different set of compile time options requiring a +uniquely build sim executable) in parallel followed by tests in parallel using the load balancer +of your choice. dvsim is built to be tool-agnostic so that you can easily switch between tools +available at your disposal. dvsim uses fusesoc as the starting step to resolve all inter-package +dependencies and provide us with a filelist that will be consumed by the sim tool. +""" + +import argparse +import datetime +import logging as log +import os +import subprocess +import sys + +import Deploy +import LintCfg +import SimCfg +import utils + +# TODO: add dvsim_cfg.hjson to retrieve this info +version = 0.1 + + +# Function to resolve the scratch root directory among the available options: +# If set on the command line, then use that as a preference. +# Else, check if $SCRATCH_ROOT env variable exists and is a directory. +# Else use the default (/scratch) +# Try to create the directory if it does not already exist. +def resolve_scratch_root(arg_scratch_root): + scratch_root = os.environ.get('SCRATCH_ROOT') + if not arg_scratch_root: + if scratch_root is None: + arg_scratch_root = os.getcwd() + "/scratch" + else: + # Scratch space could be mounted in a filesystem (such as NFS) on a network drive. + # If the network is down, it could cause the access access check to hang. So run a + # simple ls command with a timeout to prevent the hang. + (out, + status) = utils.run_cmd_with_timeout(cmd="ls -d " + scratch_root, + timeout=1, + exit_on_failure=0) + if status == 0 and out != "": + arg_scratch_root = scratch_root + else: + arg_scratch_root = os.getcwd() + "/scratch" + log.warning( + "Env variable $SCRATCH_ROOT=\"{}\" is not accessible.\n" + "Using \"{}\" instead.".format(scratch_root, + arg_scratch_root)) + else: + arg_scratch_root = os.path.realpath(arg_scratch_root) + + try: + os.system("mkdir -p " + arg_scratch_root) + except OSError: + log.fatal( + "Invalid --scratch-root=\"%s\" switch - failed to create directory!", + arg_scratch_root) + sys.exit(1) + return (arg_scratch_root) + + +# Set and return the current GitHub branch name, unless set on the command line. +# It runs "git branch --show-current". If the command fails, it throws a warning +# and sets the branch name to "default" +def resolve_branch(arg_branch): + if arg_branch is None or arg_branch == "": + result = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"], + stdout=subprocess.PIPE) + arg_branch = result.stdout.decode("utf-8").strip() + if arg_branch == "": + log.warning( + "Failed to find current git branch. Setting it to \"default\"") + arg_branch = "default" + return (arg_branch) + + +# Get the project root directory path - this is used to construct the full paths +def get_proj_root(): + cmd = ["git", "rev-parse", "--show-toplevel"] + result = subprocess.run(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + proj_root = result.stdout.decode("utf-8").strip() + if not proj_root: + log.error( + "Attempted to find the root of this GitHub repository by running:\n" + "{}\n" + "But this command has failed:\n" + "{}".format(' '.join(cmd), result.stderr.decode("utf-8"))) + sys.exit(1) + return (proj_root) + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + + parser.add_argument("cfg", + metavar="", + help="""Configuration hjson file.""") + + parser.add_argument("-i", + "--items", + nargs="*", + default=["sanity"], + metavar="regr1, regr2, regr3|test1, test2, test3, ...", + help="""Indicate which regressions or tests to run.""") + + parser.add_argument( + "-l", + "--list", + nargs="+", + default=[], + metavar="build_modes|run_modes|tests|regressions", + help= + """List the available build_modes / run_modes / tests / regressions for use.""" + ) + + parser.add_argument("-t", + "--tool", + default="", + metavar="vcs|xcelium|ascentlint|...", + help="Override the tool that is set in hjson file") + + parser.add_argument( + "-sr", + "--scratch-root", + metavar="path", + help="""root scratch directory path where all build / run drectories go; + by default, the tool will create the {scratch_path} = {scratch_root}/{dut} + directory if it doesn't already exist; under {scratch_path}, there will be + {compile_set} set of directories where all the build outputs go and + {test_name} set of directories where the test outputs go""" + ) + + parser.add_argument("-pr", + "--proj-root", + metavar="path", + help="""Specify the root directory of the project. + If this option is not passed, the tool will assume that this is + a local GitHub repository and will attempt to automatically find + the root directory.""") + + parser.add_argument( + "-br", + "--branch", + default="", + metavar="", + help= + """This variable is used to construct the scratch path directory name. If not + specified, it defaults to the GitHub branch name. The idea is to uniquefy the + scratch paths between different branches.""") + + parser.add_argument( + "-bo", + "--build-opts", + nargs="+", + default=[], + metavar="", + help="""Pass additional build options over the command line; + note that if there are multiple compile sets identified to be built, + these options will be passed on to all of them""") + + parser.add_argument( + "-bm", + "--build-modes", + nargs="+", + default=[], + metavar="", + help="""Set build modes on the command line for all tests run as a part + of the regression.""") + + parser.add_argument( + "-ro", + "--run-opts", + nargs="+", + default=[], + metavar="", + help="""Pass additional run time options over the command line; + these options will be passed on to all tests scheduled to be run""" + ) + + parser.add_argument( + "-rm", + "--run-modes", + nargs="+", + default=[], + metavar="", + help="""Set run modes on the command line for all tests run as a part + of the regression.""") + + parser.add_argument( + "-bu", + "--build-unique", + default=False, + action='store_true', + help= + """By default, under the {scratch} directory, there is a {compile_set} + directory created where the build output goes; this can be + uniquified by appending the current timestamp. This is suitable + for the case when a test / regression already running and you want + to run something else from a different terminal without affecting + the previous one""") + + parser.add_argument( + "--build-only", + default=False, + action='store_true', + help="Only build the simulation executables for the givem items.") + + parser.add_argument( + "--run-only", + default=False, + action='store_true', + help="Assume sim exec is available and proceed to run step") + + parser.add_argument( + "-s", + "--seeds", + nargs="+", + default=[], + metavar="seed0 seed1 ...", + help= + """Run tests with a specific seeds. Note that these specific seeds are applied to + items being run in the order they are passed.""") + + parser.add_argument( + "--fixed-seed", + type=int, + default=None, + help= + """Run all items with a fixed seed value. This option enforces --reseed 1.""" + ) + + parser.add_argument( + "-r", + "--reseed", + type=int, + default=-1, + metavar="N", + help="""Repeat tests with N iterations with different seeds""") + + parser.add_argument("-rx", + "--reseed-multiplier", + type=int, + default=1, + metavar="N", + help="""Multiplier for existing reseed values.""") + + parser.add_argument("-w", + "--waves", + default=False, + action='store_true', + help="Enable dumping of waves") + + parser.add_argument("-d", + "--dump", + default="fsdb", + metavar="fsdb|shm", + help="Dump waves in fsdb or shm.") + + parser.add_argument("-mw", + "--max-waves", + type=int, + default=5, + metavar="N", + help="""Enable dumpling of waves for at most N tests; + this includes tests scheduled for run AND automatic rerun""" + ) + + parser.add_argument("-c", + "--cov", + default=False, + action='store_true', + help="turn on coverage collection") + + parser.add_argument( + "--cov-merge-previous", + default=False, + action='store_true', + help="""Applicable when --cov switch is enabled. If a previous cov + database directory exists, this switch will cause it to be merged with + the current cov database.""") + + parser.add_argument( + "--cov-analyze", + default=False, + action='store_true', + help="Analyze the coverage from the last regression result.") + + parser.add_argument("-p", + "--profile", + default="none", + metavar="time|mem", + help="Turn on simulation profiling") + + parser.add_argument("--xprop-off", + default=False, + action='store_true', + help="Turn off Xpropagation") + + parser.add_argument("--job-prefix", + default="", + metavar="job-prefix", + help="Job prefix before deploying the tool commands.") + + parser.add_argument("--purge", + default=False, + action='store_true', + help="Clean the scratch directory before running.") + + parser.add_argument( + "-mo", + "--max-odirs", + type=int, + default=5, + metavar="N", + help="""When tests are run, the older runs are backed up. This switch + limits the number of backup directories being maintained.""") + + parser.add_argument( + "--no-rerun", + default=False, + action='store_true', + help= + """By default, failing tests will be automatically be rerun with waves; + this option will prevent the rerun from being triggered""") + + parser.add_argument("--skip-ral", + default=False, + action='store_true', + help="""Skip the ral generation step.""") + + parser.add_argument("-v", + "--verbosity", + default="l", + metavar="n|l|m|h|d", + help="""Set verbosity to none/low/medium/high/debug; + This will override any setting added to any of the hjson files + used for config""") + + parser.add_argument("--email", + nargs="+", + default=[], + metavar="", + help="""email the report to specified addresses""") + + parser.add_argument( + "--verbose", + nargs="?", + default=None, + const="default", + metavar="debug", + help="""Print verbose dvsim tool messages. If 'debug' is passed, then the + volume of messages is ven higher.""") + + parser.add_argument("--version", + default=False, + action='store_true', + help="Print version and exit") + + parser.add_argument( + "-n", + "--dry-run", + default=False, + action='store_true', + help= + "Print dvsim tool messages only, without actually running any command") + + parser.add_argument( + "--map-full-testplan", + default=False, + action='store_true', + help="Force complete testplan annotated results to be shown at the end." + ) + + parser.add_argument( + "--publish", + default=False, + action='store_true', + help="Publish results to the reports.opentitan.org web server.") + + parser.add_argument( + "-pi", + "--print-interval", + type=int, + default=10, + metavar="N", + help="""Interval in seconds. Print status every N seconds.""") + + parser.add_argument( + "-mp", + "--max-parallel", + type=int, + default=16, + metavar="N", + help="""Run only upto a fixed number of builds/tests at a time.""") + + parser.add_argument( + "--local", + default=False, + action='store_true', + help= + """Deploy builds and runs on the local workstation instead of the compute farm. + Support for this has not been added yet.""") + + args = parser.parse_args() + + if args.version: + print(version) + sys.exit() + + # Add log level 'VERBOSE' between INFO and DEBUG + log.addLevelName(utils.VERBOSE, 'VERBOSE') + + log_format = '%(levelname)s: [%(module)s] %(message)s' + log_level = log.INFO + if args.verbose == "default": + log_level = utils.VERBOSE + elif args.verbose == "debug": + log_level = log.DEBUG + log.basicConfig(format=log_format, level=log_level) + + if not os.path.exists(args.cfg): + log.fatal("Path to config file %s appears to be invalid.", args.cfg) + sys.exit(1) + + # If publishing results, then force full testplan mapping of results. + if args.publish: + args.map_full_testplan = True + + args.scratch_root = resolve_scratch_root(args.scratch_root) + args.branch = resolve_branch(args.branch) + args.cfg = os.path.abspath(args.cfg) + + # Add timestamp to args that all downstream objects can use. + # Static variables - indicate timestamp. + ts_format_long = "%A %B %d %Y %I:%M:%S%p %Z" + ts_format = "%a.%m.%d.%y__%I.%M.%S%p" + curr_ts = datetime.datetime.now() + timestamp_long = curr_ts.strftime(ts_format_long) + timestamp = curr_ts.strftime(ts_format) + setattr(args, "ts_format_long", ts_format_long) + setattr(args, "ts_format", ts_format) + setattr(args, "timestamp_long", timestamp_long) + setattr(args, "timestamp", timestamp) + + # Register the seeds from command line with RunTest class. + Deploy.RunTest.seeds = args.seeds + # If we are fixing a seed value, no point in tests having multiple reseeds. + if args.fixed_seed: + args.reseed = 1 + Deploy.RunTest.fixed_seed = args.fixed_seed + + # Register the common deploy settings. + Deploy.Deploy.print_interval = args.print_interval + Deploy.Deploy.max_parallel = args.max_parallel + Deploy.Deploy.max_odirs = args.max_odirs + + # Build infrastructure from hjson file and create the list of items to + # be deployed. + + # Sets the project root directory: either specified from the command line + # or set by automatically assuming we are in a GitHub repository and + # automatically finding the root of this repository. + if args.proj_root: + proj_root = args.proj_root + else: + proj_root = get_proj_root() + + # TODO: SimCfg item below implies DV - need to solve this once we add FPV + # and other ASIC flow targets. + if args.tool == 'ascentlint': + cfg = LintCfg.LintCfg(args.cfg, proj_root, args) + else: + cfg = SimCfg.SimCfg(args.cfg, proj_root, args) + + # List items available for run if --list switch is passed, and exit. + if args.list != []: + cfg.print_list() + sys.exit(0) + + # In simulation mode: if --cov-analyze switch is passed, then run the GUI + # tool. + if args.cov_analyze: + cfg.cov_analyze() + sys.exit(0) + + # Purge the scratch path if --purge option is set. + if args.purge: + cfg.purge() + + # Deploy the builds and runs + if args.items != []: + # Create deploy objects. + cfg.create_deploy_objects() + cfg.deploy_objects() + + # Generate results. + cfg.gen_results() + + # Publish results + if args.publish: + cfg.publish_results() + + else: + log.info("No items specified to be run.") + + # Exit with non-zero status if there were errors or failures. + if cfg.has_errors(): + log.error("Errors were encountered in this run.") + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/vendor/lowrisc_ip/dvsim/style.css b/vendor/lowrisc_ip/dvsim/style.css new file mode 100644 index 00000000..c2e03a93 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/style.css @@ -0,0 +1,134 @@ +/* Copyright lowRISC contributors. + * Licensed under the Apache License, Version 2.0, see LICENSE for details. + * SPDX-License-Identifier: Apache-2.0 + */ + +/* CSS for reports.opentitan.org. + * This is currently uploaded to reports.opentitan.org/css/style,css. It is + * referenced by all results published to the reports server for some basic + * styling. After making any change to this file, it needs to be manually + * copied over so that the new changes are reflected in the results pages. + * gsutil cp gs://reports.opentitan.org/css/style.css + */ + +.results { + width: 80%; + max-width: 960px; + padding-left: 40px; + padding-right: 40px; + margin: 0 auto; + position: relative; + display: flex; + flex-direction: column; + min-height: 100vh; + font-family: "Trebuchet MS", Arial, Helvetica, sans-serif; +} + +.results p { + text-align: justify; +} + +.results pre { + overflow-x: auto; + white-space: pre-wrap; + white-space: -moz-pre-wrap; + white-space: -pre-wrap; + white-space: -o-pre-wrap; +} + +.results h1, .results h2, .results h3 { + text-align: center; +} + +.results table { + width: 90%; + margin: 2% auto; + border: 1px solid #f2f2f2; + border-collapse: collapse; + text-align: center; + vertical-align: middle; + display: table; + table-layout: auto +} + +.results th { + padding-top: 12px; + padding-bottom: 12px; + background-color: #3D1067; + text-transform: uppercase; + color: white; +} + +.results th, .results td { + border: 1px solid #f2f2f2; + padding: 8px; +} + +.results tr:hover { + background-color: #f2f2f2; +} + +.results tbody tr:nth-child(even) { + background: #f2f2f2; +} + +/* Color encoding for percentages. */ +.cna { + color: 000000; + background-color: #f8f8f8; +} + +.c0 { + color: #ffffff; + background-color: #EF5757; +} + +.c1 { + color: #ffffff; + background-color: #EF6D57; +} + +.c2 { + color: #000000; + background-color: #EF8357; +} + +.c3 { + color: #000000; + background-color: #EF9957; +} + +.c4 { + color: #000000; + background-color: #EFAF57; +} + +.c5 { + color: #000000; + background-color: #EFC557; +} + +.c6 { + color: #000000; + background-color: #EFDB57; +} + +.c7 { + color: #000000; + background-color: #ECEF57; +} + +.c8 { + color: #000000; + background-color: #D6EF57; +} + +.c9 { + color: #000000; + background-color: #C0EF57; +} + +.c10 { + color: #000000; + background-color: #57EF57; +} diff --git a/vendor/lowrisc_ip/dvsim/testplanner.py b/vendor/lowrisc_ip/dvsim/testplanner.py new file mode 100755 index 00000000..b93eaf59 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r"""Command-line tool to parse and process testplan Hjson + +""" +import argparse +import logging as log +import os +import sys +from pathlib import PurePath + +import hjson + +from testplanner import testplan_utils + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + 'testplan', + metavar='', + help='input testplan file (*.hjson)') + parser.add_argument( + '-r', + '--regr_results', + metavar='', + help='input regression results file (*.hjson)') + parser.add_argument( + '--outfile', + '-o', + type=argparse.FileType('w'), + default=sys.stdout, + help='output HTML file (without CSS)') + args = parser.parse_args() + outfile = args.outfile + + with outfile: + testplan_utils.gen_html(args.testplan, args.regr_results, outfile) + + +if __name__ == '__main__': + main() diff --git a/vendor/lowrisc_ip/dvsim/testplanner/README.md b/vendor/lowrisc_ip/dvsim/testplanner/README.md new file mode 100644 index 00000000..c6631662 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner/README.md @@ -0,0 +1,237 @@ +--- +title: "Testplanner tool" +--- + +Testplanner is a Python based tool for parsing testplans written in Hjson +format into a data structure that can be used for: +* Expanding the testplan inline within the DV plan as a table +* Annotating the regression results with testplan entries for a document driven DV execution + +Please see [DV methodology]({{< relref "doc/ug/dv_methodology.md#documentation" >}}) +for more details on the rationale and motivation for writing and maintaining testplans +in a machine-parsable format (`Hjson`). +This document will focus on the anatomy of a Hjson testplan, list of features supported +and some of the ways of using the tool. + +## Hjson Testplan + +### Testplan entries +Minimally, the following items are sufficient to adequately capture the +intent of a planned test: +* **name: name of the planned test** + + This is a single `lower_snake_case` string that succinctly describes the intended + feature being tested. As an example, a basic sanity test which is typically the + first test written on a brand new testbench would be simply named `sanity`. + +* **milestone: verification milestone** + + This is one of {"`V1`", "`V2`" and "`V3`"}. This allows us to concretely indicate + that all goals for a particular milestone have been achieved and we can + transition to the next. + +* **desc: description of the planned test** + + A multi-line string that briefly describes the intent of the test. It is + recommended to add a high level goal, stimulus and checking procedure so + that the reader gets the full picture of what and how the said feature is being + tested. + + Full [Markdown]({{< relref "doc/rm/markdown_usage_style" >}}) syntax is supported + when writing the description. + +* **tests: list of actual written tests that maps to this planned test** + + Testplan is written in the initial work stage of the verification + [life-cycle]({{< relref "doc/project/hw_stages#hardware-verification-stages" >}}). + When the DV engineer gets to actually developing the test, it may not map 1:1 to + the planned test - it may be possible that an already written test that mapped + to another planned test also satisfies the current one; OR it may also be + possible that the planned test needs to be split into multiple smaller tests. + To cater to these needs, we provide the ability to set a list of actual written + tests that maps to each planned test. This information will then be used to map + the regression results and annotate them to the testplan to generate the final + table. This list does not have to be populated right away. It can be updated + as and when tests are written. + +If need arises, more entries can be added to this list relatively easily. + +Testplan entries are added using the `entries` key, which is a list that looks +like this: +```hjson + entries: [ + { + name: feature1 + milestone: V1 + desc: '''**Goal**: High level goal of this test + + **Stimulus**: Describe the stimulus procedure. + + **Check**: Describe the checking procedure.''' + tests: ["foo_feature1"] + } + { + name: feature2 + milestone: V2 + desc: '''**Goal**: High level goal of this test + + **Stimulus**: Describe the stimulus procedure. + + **Check**: Describe the checking procedure.''' + tests: ["foo_feature2_test1", + "foo_feature2_test2", + "foo_feature2_test3"] + } + ... + ] +``` + +### Import shared testplans +Typically, there are tests that are common to more that one testbench and can be +made a part of a 'shared' testplan that each DUT testplan can simply import. An +example of this is running the automated UVM RAL CSR tests, which applies to +almost all DUTs. This can be done using the `import_testplans` key: +```hjson + import_testplans: ["util/dvsim/testplanner/examples/common_testplan.hjson", + "hw/dv/tools/csr_testplan.hjson"] +``` + +Note that the paths to common testplans are relative to `$REPO_TOP`. + +For the sake of discussion below, we will refer to the 'main' or DUT testplan +as 'DUT' testplan and the shared testplans it imports as 'shared' or 'imported' +testplans. + +The imported testplans actually present a problem - how can we set +actual written tests that maps to the shared testplan entry generically +enough that they apply to more than one DUTs? We currently solve this by +providing wildcards, which are single `lower_snake_case` strings within +braces `'{..}'`. A substitution value (or list of values) for the wildcard +string can be optionally provided in the DUT testplan. Here's an example: + +```hjson +------- + // UART testplan: + name: uart + +------- + // Imported testplan: + { + name: csr + ... + tests: ["{name}{intf}_csr_hw_reset"] + } +``` + +In the example above, `{name}` and `{intf}` are wildcards used in the +shared testplan for which substitution values are to be provided in the +DUT testplan. When the tool parses the DUT testplan along with the +imported testplans, it substitutes the wildcards with the substition +values found in the DUT testplan. If substitution is not available, then +the wildcard is replaced with an empty string. In the example above, +the list of written test resolves to `["uart_csr_hw_reset"]` after +substituting `{name}` with `uart` and `{intf}` with an empty string. +As many wildcards as needed can be added to the tests in the shared +testplans to support as wide usecases as possible across different +testbenches. Moreover, the substitution value can be a list of strings, +in which case, the list of written tests will resolve to all values +being substituted. See example below for illustration: + +```hjson +------- + // Chip testplan: + name: chip + intf: ["", "_jtag"] + foo: ["x", "y", "z"] + +------- + // Imported testplan: + { + name: csr + ... + tests: ["{name}{intf}_csr_hw_reset_{foo}"] + } +``` + +This will resolve to the following 6 tests: + +``` +["chip_csr_hw_reset_x", "chip_csr_hw_reset_y", "chip_csr_hw_reset_z", + "chip_jtag_csr_hw_reset_x", "chip_jtag_csr_hw_reset_y", "chip_jtag_csr_hw_reset_z"] +``` + +### Example sources + +The following examples provided within `util/dvsim/testplanner/examples` can be used as +a starting point. +* **`foo_testplan.hjson`**: DUT testplan +* **`common_testplan.hjson`**: shared testplan imported within the DUT testplan +* **`foo_dv_plan.md`**: DUT testplan imported within the DV plan doc in Markdown + +In addition, see the [UART DV Plan]({{< relref "hw/ip/uart/doc/dv_plan" >}}) for a +real 'production' example of inline expansion of an imported testplan as a table +within the DV Plan document. +The [UART testplan](https://github.com/lowRISC/opentitan/blob/master/hw/ip/uart/data/uart_testplan.hjson) +imports the shared testplans located at `hw/dv/tools/testplans` area. + +### Limitations + +The following limitations currently hold: +* Only the DUT testplan can import shared testplans; the imported + testplans cannot further import more testplans +* All planned test names parsed from the DUT testplan and all of + its imported tetsplans need to be unique + +## Usage examples + +### Standalone tool invocations + +Generate the testplan table in HTML to stdout: +```console +$ util/dvsim/testplanner.py testplanner/examples/foo_testplan.hjson +``` + +Generate the testplan table in HTML to a file: +```console +$ util/dvsim/testplanner.py testplanner/examples/foo_testplan.hjson -o /tmp/foo_testplan_table.html +``` + +Generate regression results table in HTML to stdout: +```console +$ util/dvsim/testplanner.py testplanner/examples/foo_testplan.hjson -r testplanner/examples/foo_regr_results.hjson +``` + +Generate regression results table in HTML to a file: +```console +$ util/dvsim/testplanner.py testplanner/examples/foo_testplan.hjson \ + -r testplanner/examples/foo_regr_results.hjson -o /tmp/foo_regr_results.html +``` + +### APIs for external tools +The `util/build_docs.py` script invokes the testplanner utility functions +directly to parse the Hjson testplan and insert a HTML table within the DV +plan document. This is done by invoking: + +```console +Example 1: +$ util/docgen.py -c ../hw/ip/uart/doc/uart_dv_plan.md -o /tmp/uart_dv_plan.html + +Example 2: +$ util/docgen.py -c testplanner/examples/foo_dv_plan.md -o /tmp/foo_dv_plan.html +``` +See following snippet of code for the APIs in use: +```python +from testplanner import class_defs, testplan_utils + + # hjson_testplan_path: a string pointing to the path to Hjson testplan + # outbuf: file buffer opened for writing + testplan = testplan_utils.parse_testplan(hjson_testplan_path) + testplan_utils.gen_html_testplan_table(testplan, outbuf) +``` + +## Future work +* Allow DUT and imported testplans have the same name for the planned test as + long as they are in separate files + * If the same name exists, then append the list of tests together +* Split the regression results table generation into a separate `dashboard_gen` + script which will also cater to generating results table for `lint` and `fpv` diff --git a/vendor/lowrisc_ip/dvsim/testplanner/__init__.py b/vendor/lowrisc_ip/dvsim/testplanner/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vendor/lowrisc_ip/dvsim/testplanner/class_defs.py b/vendor/lowrisc_ip/dvsim/testplanner/class_defs.py new file mode 100644 index 00000000..a7c9eed7 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner/class_defs.py @@ -0,0 +1,340 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r"""TestplanEntry and Testplan classes for maintaining testplan entries +""" + +import re +import sys +from collections import OrderedDict + +import mistletoe +from tabulate import tabulate + + +class TestplanEntry(): + """An entry in the testplan + + A testplan entry has the following information: name of the planned test (testpoint), + a brief description indicating intent, stimulus and checking procedure, targeted milestone + and the list of actual developed tests. + """ + name = "" + desc = "" + milestone = "" + tests = [] + + fields = ("name", "desc", "milestone", "tests") + milestones = ("N.A.", "V1", "V2", "V3") + + def __init__(self, name, desc, milestone, tests, substitutions=[]): + self.name = name + self.desc = desc + self.milestone = milestone + self.tests = tests + if not self.do_substitutions(substitutions): sys.exit(1) + + @staticmethod + def is_valid_entry(kv_pairs): + '''Pass a list of key=value pairs to check if testplan entries can be extracted + from it. + ''' + for field in TestplanEntry.fields: + if not field in kv_pairs.keys(): + print( + "Error: input key-value pairs does not contain all of the ", + "required fields to create an entry:\n", kv_pairs, + "\nRequired fields:\n", TestplanEntry.fields) + return False + if type(kv_pairs[field]) is str and kv_pairs[field] == "": + print("Error: field \'", field, "\' is an empty string\n:", + kv_pairs) + return False + if field == "milestone" and kv_pairs[ + field] not in TestplanEntry.milestones: + print("Error: milestone \'", kv_pairs[field], + "\' is invalid. Legal values:\n", + TestplanEntry.milestones) + return False + return True + + def do_substitutions(self, substitutions): + '''Substitute {wildcards} in tests + + If tests have {wildcards}, they are substituted with the 'correct' values using + key=value pairs provided by the substitutions arg. If wildcards are present but no + replacement is available, then the wildcards are replaced with an empty string. + ''' + if substitutions == []: return True + for kv_pair in substitutions: + resolved_tests = [] + [(k, v)] = kv_pair.items() + for test in self.tests: + match = re.findall(r"{([A-Za-z0-9\_]+)}", test) + if len(match) > 0: + # match is a list of wildcards used in test + for item in match: + if item == k: + if type(v) is list: + if v == []: + resolved_test = test.replace( + "{" + item + "}", "") + resolved_tests.append(resolved_test) + else: + for subst_item in v: + resolved_test = test.replace( + "{" + item + "}", subst_item) + resolved_tests.append(resolved_test) + elif type(v) is str: + resolved_test = test.replace( + "{" + item + "}", v) + resolved_tests.append(resolved_test) + else: + print( + "Error: wildcard", item, "in test", test, + "has no viable", + "replacement value (need str or list):\n", + kv_pair) + return False + else: + resolved_tests.append(test) + if resolved_tests != []: self.tests = resolved_tests + + # if wildcards have no available replacements in substitutions arg, then + # replace with empty string + resolved_tests = [] + for test in self.tests: + match = re.findall(r"{([A-Za-z0-9\_]+)}", test) + if len(match) > 0: + for item in match: + resolved_tests.append(test.replace("{" + item + "}", "")) + if resolved_tests != []: self.tests = resolved_tests + return True + + def map_regr_results(self, regr_results, map_full_testplan=True): + '''map regression results to tests in this entry + + Given a list of regression results (a tuple containing {test name, # passing and + # total} find if the name of the test in the results list matches the written tests + in this testplan entry. If there is a match, then append the passing / total + information. If no match is found, or if self.tests is an empty list, indicate 0/1 + passing so that it is factored into the final total. + ''' + test_results = [] + for test in self.tests: + found = False + for regr_result in regr_results: + if test == regr_result["name"]: + test_results.append(regr_result) + regr_result["mapped"] = True + found = True + break + + # if a test was not found in regr results, indicate 0/1 passing + if map_full_testplan and not found: + test_results.append({"name": test, "passing": 0, "total": 0}) + + # if no written tests were indicated in the testplan, reuse planned + # test name and indicate 0/1 passing + if map_full_testplan and self.tests == []: + test_results.append({"name": self.name, "passing": 0, "total": 0}) + + # replace tests with test results + self.tests = test_results + return regr_results + + def display(self): + print("testpoint: ", self.name) + print("description: ", self.desc) + print("milestone: ", self.milestone) + print("tests: ", self.tests) + + +class Testplan(): + """The full testplan + + This comprises of TestplanEntry entries + """ + + name = "" + entries = [] + + def __init__(self, name): + self.name = name + self.entries = [] + self.results_summary = OrderedDict() + self.results = "" + + if name == "": + print("Error: testplan name cannot be empty") + sys.exit(1) + + def entry_exists(self, entry): + '''check if new entry has the same name as one of the existing entries + ''' + for existing_entry in self.entries: + if entry.name == existing_entry.name: + print("Error: found a testplan entry with name = ", entry.name) + print("existing entry:\n", existing_entry) + print("new entry:\n", entry) + return True + return False + + def add_entry(self, entry): + '''add a new entry into the testplan + ''' + if self.entry_exists(entry): sys.exit(1) + self.entries.append(entry) + + def sort(self): + '''sort entries by milestone + ''' + self.entries = sorted(self.entries, key=lambda entry: entry.milestone) + + def map_regr_results(self, regr_results, map_full_testplan=True): + '''map regression results to testplan entries + ''' + def sum_results(totals, entry): + '''function to generate milestone and grand totals + ''' + ms = entry.milestone + for test in entry.tests: + # Create dummy tests entry for milestone total + if totals[ms].tests == []: + totals[ms].tests = [{ + "name": "**TOTAL**", + "passing": 0, + "total": 0 + }] + # Sum milestone total + totals[ms].tests[0]["passing"] += test["passing"] + totals[ms].tests[0]["total"] += test["total"] + # Sum grand total + if ms != "N.A.": + totals["N.A."].tests[0]["passing"] += test["passing"] + totals["N.A."].tests[0]["total"] += test["total"] + return totals + + totals = {} + # Create entry for total in each milestone; & the grand total. + for ms in TestplanEntry.milestones: + totals[ms] = TestplanEntry(name="N.A.", + desc="Total tests", + milestone=ms, + tests=[{ + "name": "**TOTAL**", + "passing": 0, + "total": 0 + }]) + if ms != "N.A.": totals[ms].tests = [] + + for entry in self.entries: + regr_results = entry.map_regr_results(regr_results, + map_full_testplan) + totals = sum_results(totals, entry) + + # extract unmapped tests from regr_results and create 'unmapped' entry + unmapped_regr_results = [] + for regr_result in regr_results: + if not "mapped" in regr_result.keys(): + unmapped_regr_results.append(regr_result) + + unmapped = TestplanEntry( + name="Unmapped tests", + desc="""A list of tests in the regression result that are not + mapped to testplan entries.""", + milestone="N.A.", + tests=unmapped_regr_results) + totals = sum_results(totals, unmapped) + + # add total back into 'entries' + for ms in TestplanEntry.milestones[1:]: + self.entries.append(totals[ms]) + self.sort() + self.entries.append(unmapped) + self.entries.append(totals["N.A."]) + + def display(self): + '''display the complete testplan for debug + ''' + print("name: ", self.name) + for entry in self.entries: + entry.display() + + def get_milestone_regressions(self): + regressions = {} + for entry in self.entries: + # Skip if milestone is "n.a." + if entry.milestone not in entry.milestones[1:]: continue + # if ms key doesnt exist, create one + if entry.milestone not in regressions.keys(): + regressions[entry.milestone] = [] + # Append new tests to the list + for test in entry.tests: + if test not in regressions[entry.milestone] and test != "": + regressions[entry.milestone].append(test) + + # Build regressions dict into a hjson like data structure + output = [] + for ms in regressions.keys(): + ms_dict = {} + ms_dict["name"] = ms + ms_dict["tests"] = regressions[ms] + output.append(ms_dict) + return output + + def testplan_table(self, fmt="pipe"): + '''Generate testplan table from hjson entries in the format specified + by the 'fmt' arg. + ''' + table = [["Milestone", "Name", "Description", "Tests"]] + colalign = ("center", "center", "left", "left") + for entry in self.entries: + tests = "" + for test in entry.tests: + tests += test + "
\n" + desc = entry.desc.strip() + if fmt == "html": + desc = mistletoe.markdown(desc) + table.append([entry.milestone, entry.name, desc, tests]) + return tabulate(table, + headers="firstrow", + tablefmt=fmt, + colalign=colalign) + + def results_table(self, regr_results, map_full_testplan=True, fmt="pipe"): + '''Print the mapped regression results into a table in the format + specified by the 'fmt' arg. + ''' + self.map_regr_results(regr_results, map_full_testplan) + table = [[ + "Milestone", "Name", "Tests", "Passing", "Total", "Pass Rate" + ]] + colalign = ("center", "center", "left", "center", "center", "center") + for entry in self.entries: + milestone = entry.milestone + entry_name = entry.name + if milestone == "N.A.": milestone = "" + if entry_name == "N.A.": entry_name = "" + for test in entry.tests: + if test["total"] == 0: pass_rate = "-- %" + else: + pass_rate = test["passing"] / test["total"] * 100 + pass_rate = "{0:.2f} %".format(round(pass_rate, 2)) + table.append([ + milestone, entry_name, test["name"], test["passing"], + test["total"], pass_rate + ]) + milestone = "" + entry_name = "" + if entry.milestone == "N.A." and entry.name == "N.A.": + self.results_summary["Name"] = self.name.upper() + self.results_summary["Passing"] = test["passing"] + self.results_summary["Total"] = test["total"] + self.results_summary["Pass Rate"] = pass_rate + + self.results = tabulate(table, + headers="firstrow", + tablefmt="pipe", + colalign=colalign) + return self.results diff --git a/vendor/lowrisc_ip/dvsim/testplanner/examples/common_testplan.hjson b/vendor/lowrisc_ip/dvsim/testplanner/examples/common_testplan.hjson new file mode 100644 index 00000000..5a5f2883 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner/examples/common_testplan.hjson @@ -0,0 +1,23 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + // only 'entries' supported in imported testplans for now + entries: [ + { + name: csr + desc: '''Standard CSR suite of tests run from all valid interfaces to prove SW + accessibility.''' + milestone: V1 + // {name} and {intf} are wildcards in tests + // importer needs to provide substitutions for these as string or a list + // if list, then substitution occurs on all values in the list + // if substitution is not provided, it will be replaced with an empty string + tests: ["{name}{intf}_csr_hw_reset", + "{name}{intf}_csr_rw", + "{name}{intf}_csr_bit_bash", + "{name}{intf}_csr_aliasing",] + } + ] +} + diff --git a/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_dv_plan.md b/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_dv_plan.md new file mode 100644 index 00000000..3ae4d281 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_dv_plan.md @@ -0,0 +1,7 @@ +--- +title: "FOO DV plan" +--- + +# Testplan + +{{< testplan "util/dvsim/testplanner/examples/foo_testplan.hjson" >}} diff --git a/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_regr_results.hjson b/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_regr_results.hjson new file mode 100644 index 00000000..be83aade --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_regr_results.hjson @@ -0,0 +1,107 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + timestamp: 10/10/2019 1:55AM + test_results: [ + { + name: foo_sanity + passing: 25 + total: 50 + } + { + name: foo_csr_hw_reset + passing: 20 + total: 20 + } + { + name: foo_jtag_csr_hw_reset + passing: 20 + total: 20 + } + { + name: foo_csr_rw + passing: 20 + total: 20 + } + { + name: foo_jtag_csr_rw + passing: 20 + total: 20 + } + { + name: foo_csr_bit_bash + passing: 20 + total: 20 + } + { + name: foo_csr_aliasing + passing: 20 + total: 20 + } + { + name: foo_jtag_csr_aliasing + passing: 20 + total: 20 + } + { + name: foo_feature1 + passing: 63 + total: 80 + } + { + name: foo_feature2_type1 + passing: 1 + total: 1 + } + { + name: foo_feature2_type2 + passing: 5 + total: 5 + } + { + name: foo_feature2_type3 + passing: 0 + total: 10 + } + { + name: foo_unmapped_test + passing: 0 + total: 10 + } + ] + cov_results: [ + { + name: line + result: 67 + } + { + name: toggle + result: 85 + } + { + name: branch + result: 78 + } + { + name: condition + result: 23 + } + { + name: fsm_seq + result: 96 + } + { + name: fsm_trans + result: 88 + } + { + name: assert + result: 40 + } + { + name: groups + result: 22 + } + ] +} diff --git a/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_testplan.hjson b/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_testplan.hjson new file mode 100644 index 00000000..10afb7e8 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner/examples/foo_testplan.hjson @@ -0,0 +1,58 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + // 'name' is mandatory field + name: "foo" + intf: ["", "_jtag"] + + // 'import_testplans' is a list of imported common testplans + // paths are relative to repository top + // all key_value pairs in this file other than 'import_testplans' and 'entries' + // can be used for wildcard substitutions in imported testplans + import_testplans: ["util/dvsim/testplanner/examples/common_testplan.hjson"] + entries: [ + { + // name of the testplan entry - should be unique + name: sanity + desc: '''Basic FOO sanity test. Describe this test in sufficient detail. You can + split the description on multiple lines like this (with 3 single-inverted + commas. Note that the subsequent lines are indented right below where the + inverted commas start.''' + // milestone for which this test is targeted for - V1, V2 or V3 + milestone: V1 + // tests of actual written tests that maps to this entry + tests: ["foo_sanity"] + } + { + name: feature1 + desc: "A single line description with single double-inverted commas." + milestone: V2 + // testplan entry with no tests added + tests: [] + } + { + name: feature2 + desc: '''**Goal**: How-to description + + **Stimulus**: If possible, in the description indicate a brief one-liner + goal on the first line. Then, describe the stimulus and check procedures like + this. + + **Check**: This style is not mandatory, but highly recommended. Also note that + the description supports Markdown formatting. Add things: + - like bullets + - something in **bold** and in *italic* + - A sub-bullet item
+ Continue describing above bullet on a new line with a HTML line break. + + Start a new paragraph with with two newlines. + ''' + milestone: V2 + // testplan entry with multiple tests added + tests: ["foo_feature2_type1", + "foo_feature2_type2", + "foo_feature2_type3"] + } + ] +} diff --git a/vendor/lowrisc_ip/dvsim/testplanner/testplan_utils.py b/vendor/lowrisc_ip/dvsim/testplanner/testplan_utils.py new file mode 100644 index 00000000..c4f80423 --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/testplanner/testplan_utils.py @@ -0,0 +1,171 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r"""Command-line tool to parse and process testplan Hjson into a data structure + The data structure is used for expansion inline within DV plan documentation + as well as for annotating the regression results. +""" +import logging as log +import os +import sys +from pathlib import PurePath + +import hjson +import mistletoe +from tabulate import tabulate + +from .class_defs import * + + +def parse_testplan(filename): + '''Parse testplan Hjson file into a datastructure''' + self_path = os.path.dirname(os.path.realpath(__file__)) + repo_root = os.path.abspath(os.path.join(self_path, os.pardir, os.pardir, os.pardir)) + + name = "" + imported_testplans = [] + substitutions = [] + obj = parse_hjson(filename) + for key in obj.keys(): + if key == "import_testplans": + imported_testplans = obj[key] + elif key != "entries": + if key == "name": name = obj[key] + substitutions.append({key: obj[key]}) + for imported_testplan in imported_testplans: + obj = merge_dicts( + obj, parse_hjson(os.path.join(repo_root, imported_testplan))) + + testplan = Testplan(name=name) + for entry in obj["entries"]: + if not TestplanEntry.is_valid_entry(entry): sys.exit(1) + testplan_entry = TestplanEntry(name=entry["name"], + desc=entry["desc"], + milestone=entry["milestone"], + tests=entry["tests"], + substitutions=substitutions) + testplan.add_entry(testplan_entry) + testplan.sort() + return testplan + + +def gen_html_indent(lvl): + return " " * lvl + + +def gen_html_write_style(outbuf): + outbuf.write("\n") + + +def gen_html_testplan_table(testplan, outbuf): + '''generate HTML table from testplan with the following fields + milestone, planned test name, description + ''' + + text = testplan.testplan_table(fmt="html") + text = text.replace("", "
") + gen_html_write_style(outbuf) + outbuf.write(text) + return + + +def gen_html_regr_results_table(testplan, regr_results, outbuf): + '''map regr results to testplan and create a table with the following fields + milestone, planned test name, actual written tests, pass/total + ''' + text = "# Regression Results\n" + text += "## Run on{}\n".format(regr_results["timestamp"]) + text += "### Test Results\n\n" + text += testplan.results_table(regr_results["test_results"]) + if "cov_results" in regr_results.keys(): + text += "\n### Coverage Results\n\n" + cov_header = [] + cov_values = [] + for cov in regr_results["cov_results"]: + cov_header.append(cov["name"]) + cov_values.append(str(cov["result"])) + colalign = (("center", ) * len(cov_header)) + text += tabulate([cov_header, cov_values], + headers="firstrow", + tablefmt="pipe", + colalign=colalign) + text = mistletoe.markdown(text) + text = text.replace("
", "
") + gen_html_write_style(outbuf) + outbuf.write(text) + return + + +def parse_regr_results(filename): + obj = parse_hjson(filename) + # TODO need additional syntax checks + if not "test_results" in obj.keys(): + print("Error: key \'test_results\' not found") + sys, exit(1) + return obj + + +def parse_hjson(filename): + try: + f = open(str(filename), 'rU') + text = f.read() + odict = hjson.loads(text) + return odict + except IOError: + print('IO Error:', filename) + raise SystemExit(sys.exc_info()[1]) + except hjson.scanner.HjsonDecodeError as e: + print("Error: Unable to decode HJSON file %s: %s" % + (str(filename), str(e))) + sys.exit(1) + + +def merge_dicts(list1, list2, use_list1_for_defaults=True): + '''merge 2 dicts into one + + This funciton takes 2 dicts as args list1 and list2. It recursively merges list2 into + list1 and returns list1. The recursion happens when the the value of a key in both lists + is a dict. If the values of the same key in both lists (at the same tree level) are of + dissimilar type, then there is a conflict and an error is thrown. If they are of the same + scalar type, then the third arg "use_list1_for_defaults" is used to pick the final one. + ''' + for key in list2.keys(): + if key in list1: + if type(list1[key]) is list and type(list2[key]) is list: + list1[key].extend(list2[key]) + elif type(list1[key]) is dict and type(list2[key]) is dict: + list1[key] = merge_dicts(list1[key], list2[key]) + elif (type(list1[key]) == type(list2[key])): + if not use_list1_for_defaults: + list1[key] = list2[key] + else: + print("The type of value of key \"", key, "\" in list1: \"", \ + str(type(list1[key])), \ + "\" does not match the type of value in list2: \"", \ + str(type(list2[key])), \ + "\". The two lists cannot be merged.") + sys.exit(1) + else: + list1[key] = list2[key] + return list1 + + +def gen_html(testplan_file, regr_results_file, outbuf): + testplan = parse_testplan(os.path.abspath(testplan_file)) + if regr_results_file: + regr_results = parse_regr_results(os.path.abspath(regr_results_file)) + gen_html_regr_results_table(testplan, regr_results, outbuf) + else: + gen_html_testplan_table(testplan, outbuf) + outbuf.write('\n') diff --git a/vendor/lowrisc_ip/dvsim/utils.py b/vendor/lowrisc_ip/dvsim/utils.py new file mode 100644 index 00000000..e42c2a6c --- /dev/null +++ b/vendor/lowrisc_ip/dvsim/utils.py @@ -0,0 +1,299 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r""" +Utility functions common across dvsim. +""" + +import logging as log +import os +import pprint +import re +import shlex +import subprocess +import sys +import time +from collections import OrderedDict + +import hjson +import mistletoe + +# For verbose logging +VERBOSE = 15 + + +# Run a command and get the result. Exit with error if the command did not +# succeed. This is a simpler version of the run_cmd function below. +def run_cmd(cmd): + (status, output) = subprocess.getstatusoutput(cmd) + if status: + sys.stderr.write("cmd " + cmd + " returned with status " + str(status)) + sys.exit(status) + return output + + +# Run a command with a specified timeout. If the command does not finish before +# the timeout, then it returns -1. Else it returns the command output. If the +# command fails, it throws an exception and returns the stderr. +def run_cmd_with_timeout(cmd, timeout=-1, exit_on_failure=1): + args = shlex.split(cmd) + p = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + # If timeout is set, poll for the process to finish until timeout + result = "" + status = -1 + if timeout == -1: + p.wait() + else: + start = time.time() + while time.time() - start < timeout: + if p.poll() is not None: + break + time.sleep(.01) + + # Capture output and status if cmd exited, else kill it + if p.poll() is not None: + result = p.communicate()[0] + status = p.returncode + else: + log.error("cmd \"%s\" timed out!", cmd) + p.kill() + + if status != 0: + log.error("cmd \"%s\" exited with status %d", cmd, status) + if exit_on_failure == 1: sys.exit(status) + + return (result, status) + + +# Parse hjson and return a dict +def parse_hjson(hjson_file): + hjson_cfg_dict = None + try: + log.debug("Parsing %s", hjson_file) + f = open(hjson_file, 'rU') + text = f.read() + hjson_cfg_dict = hjson.loads(text, use_decimal=True) + f.close() + except Exception as e: + log.fatal( + "Failed to parse \"%s\" possibly due to bad path or syntax error.\n%s", + hjson_file, e) + sys.exit(1) + return hjson_cfg_dict + + +def subst_wildcards(var, mdict, ignored_wildcards=[], ignore_error=False): + ''' + If var has wildcards specified within {..}, find and substitute them. + ''' + def subst(wildcard, mdict): + if wildcard in mdict.keys(): return mdict[wildcard] + else: return None + + if "{eval_cmd}" in var: + idx = var.find("{eval_cmd}") + 11 + subst_var = subst_wildcards(var[idx:], mdict, ignored_wildcards, + ignore_error) + # If var has wildcards that were ignored, then skip running the command + # for now, assume that it will be handled later. + match = re.findall(r"{([A-Za-z0-9\_]+)}", subst_var) + if len(match) == 0: + var = run_cmd(subst_var) + else: + match = re.findall(r"{([A-Za-z0-9\_]+)}", var) + if len(match) > 0: + subst_list = {} + for item in match: + if item not in ignored_wildcards: + log.debug("Found wildcard \"%s\" in \"%s\"", item, var) + found = subst(item, mdict) + if found is not None: + if type(found) is list: + subst_found = [] + for element in found: + element = subst_wildcards( + element, mdict, ignored_wildcards, + ignore_error) + subst_found.append(element) + # Expand list into a str since list within list is + # not supported. + found = " ".join(subst_found) + + elif type(found) is str: + found = subst_wildcards(found, mdict, + ignored_wildcards, + ignore_error) + + elif type(found) is bool: + found = int(found) + subst_list[item] = found + else: + # Check if the wildcard exists as an environment variable + env_var = os.environ.get(item) + if env_var is not None: subst_list[item] = env_var + elif not ignore_error: + log.error( + "Substitution for the wildcard \"%s\" not found", + item) + sys.exit(1) + for item in subst_list: + var = var.replace("{" + item + "}", str(subst_list[item])) + return var + + +def find_and_substitute_wildcards(sub_dict, + full_dict, + ignored_wildcards=[], + ignore_error=False): + ''' + Recursively find key values containing wildcards in sub_dict in full_dict + and return resolved sub_dict. + ''' + for key in sub_dict.keys(): + if type(sub_dict[key]) in [dict, OrderedDict]: + # Recursively call this funciton in sub-dicts + sub_dict[key] = find_and_substitute_wildcards( + sub_dict[key], full_dict, ignored_wildcards, ignore_error) + + elif type(sub_dict[key]) is list: + sub_dict_key_values = list(sub_dict[key]) + # Loop through the list of key's values and substitute each var + # in case it contains a wildcard + for i in range(len(sub_dict_key_values)): + if type(sub_dict_key_values[i]) in [dict, OrderedDict]: + # Recursively call this funciton in sub-dicts + sub_dict_key_values[i] = \ + find_and_substitute_wildcards(sub_dict_key_values[i], + full_dict, ignored_wildcards, ignore_error) + + elif type(sub_dict_key_values[i]) is str: + sub_dict_key_values[i] = subst_wildcards( + sub_dict_key_values[i], full_dict, ignored_wildcards, + ignore_error) + + # Set the substituted key values back + sub_dict[key] = sub_dict_key_values + + elif type(sub_dict[key]) is str: + sub_dict[key] = subst_wildcards(sub_dict[key], full_dict, + ignored_wildcards, ignore_error) + return sub_dict + + +def md_results_to_html(title, css_path, md_text): + '''Convert results in md format to html. Add a little bit of styling. + ''' + html_text = "\n" + html_text += "\n" + html_text += "\n" + if title != "": + html_text += " {}\n".format(title) + if css_path != "": + html_text += " \n".format(css_path) + html_text += "\n" + html_text += "\n" + html_text += "
\n" + html_text += mistletoe.markdown(md_text) + html_text += "
\n" + html_text += "\n" + html_text += "\n" + html_text = htmc_color_pc_cells(html_text) + return html_text + + +def htmc_color_pc_cells(text): + '''This function finds cells in a html table that contains a "%" sign. It then + uses the number in front if the % sign to color the cell based on the value + from a shade from red to green. These color styles are encoded in ./style.css + which is assumed to be accessible by the final webpage. + + This function is now augmented to also take "E" or "W" as identifiers along + with "%". For example, '10 W' is indicative of 10 warnings, and will be color + coded with yellow. Likewise, "7 E" indicates 7 errors and will be color coded + with red. A value of 0 in both cases will be color coded with green. + + Note that a space between the value and the indicators (%, E, W) is mandatory. + ''' + + # Replace )", text) + if len(match) > 0: + subst_list = {} + fp_nums = [] + for item in match: + # item is a tuple - first is the full string indicating the table + # cell which we want to replace, second is the floating point value. + cell = item[0] + fp_num = item[1] + indicator = item[2] + # Skip if fp_num is already processed. + if (fp_num, indicator) in fp_nums: continue + fp_nums.append((fp_num, indicator)) + if fp_num in na_list: subst = color_cell(cell, "cna", indicator) + else: + # Item is a fp num. + try: + fp = float(fp_num) + except ValueError: + log.error("Percentage item \"%s\" in cell \"%s\" is not an " + \ + "integer or a floating point number", fp_num, cell) + continue + if indicator == "%": + # Item is a percentage. + if fp >= 0.0 and fp < 10.0: subst = color_cell(cell, "c0") + elif fp >= 10.0 and fp < 20.0: + subst = color_cell(cell, "c1") + elif fp >= 20.0 and fp < 30.0: + subst = color_cell(cell, "c2") + elif fp >= 30.0 and fp < 40.0: + subst = color_cell(cell, "c3") + elif fp >= 40.0 and fp < 50.0: + subst = color_cell(cell, "c4") + elif fp >= 50.0 and fp < 60.0: + subst = color_cell(cell, "c5") + elif fp >= 60.0 and fp < 70.0: + subst = color_cell(cell, "c6") + elif fp >= 70.0 and fp < 80.0: + subst = color_cell(cell, "c7") + elif fp >= 80.0 and fp < 90.0: + subst = color_cell(cell, "c8") + elif fp >= 90.0 and fp < 100.0: + subst = color_cell(cell, "c9") + elif fp >= 100.0: + subst = color_cell(cell, "c10") + else: + # Item is a error or a warning num. + # Use "c6" (yellow) for warnings and "c0" (red) for errors. + if fp == 0: + subst = color_cell(cell, "c10", indicator) + elif indicator == "W": + subst = color_cell(cell, "c6", indicator) + elif indicator == "E": + subst = color_cell(cell, "c0", indicator) + subst_list[cell] = subst + for item in subst_list: + text = text.replace(item, subst_list[item]) + return text diff --git a/vendor/lowrisc_ip/uvmdvgen/Makefile.tpl b/vendor/lowrisc_ip/uvmdvgen/Makefile.tpl new file mode 100644 index 00000000..01c1380d --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/Makefile.tpl @@ -0,0 +1,43 @@ +${'####################################################################################################'} +${'## Copyright lowRISC contributors. ##'} +${'## Licensed under the Apache License, Version 2.0, see LICENSE for details. ##'} +${'## SPDX-License-Identifier: Apache-2.0 ##'} +${'####################################################################################################'} +${'## Entry point test Makefile for building and running tests. ##'} +${'## These are generic set of option groups that apply to all testbenches. ##'} +${'## This flow requires the following options to be set: ##'} +${'## DV_DIR - current dv directory that contains the test Makefile ##'} +${'## DUT_TOP - top level dut module name ##'} +${'## TB_TOP - top level tb module name ##'} +${'## DOTF - .f file used for compilation ##'} +${'## COMPILE_KEY - compile option set ##'} +${'## TEST_NAME - name of the test to run - this is supplied on the command line ##'} +${'####################################################################################################'} +DV_DIR := ${'$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))'} +export DUT_TOP := ${name} +export TB_TOP := tb +FUSESOC_CORE := lowrisc:dv:${name}_sim:0.1 +COMPILE_KEY ?= default + +# Add coverage exclusion file below +COV_IP_EXCL ?= + +${'####################################################################################################'} +${'## A D D I N D I V I D U A L T E S T S B E L O W ##'} +${'####################################################################################################'} +TEST_NAME ?= ${name}_sanity +UVM_TEST ?= ${name}_base_test +UVM_TEST_SEQ ?= ${name}_base_vseq + +# common tests/seqs +include ${'$'}{DV_DIR}/../../../dv/tools/common_tests.mk + +ifeq (${'$'}{TEST_NAME},${name}_sanity) + UVM_TEST_SEQ = ${name}_sanity_vseq +endif + +${'####################################################################################################'} +${'## Include the tool Makefile below ##'} +${'## Dont add anything else below it! ##'} +${'####################################################################################################'} +include ${'$'}{DV_DIR}/../../../dv/tools/Makefile diff --git a/vendor/lowrisc_ip/uvmdvgen/README.md b/vendor/lowrisc_ip/uvmdvgen/README.md new file mode 100644 index 00000000..5aeb6a58 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/README.md @@ -0,0 +1,346 @@ +--- +title: "Uvmdvgen: Initial testbench auto-generation tool" +--- + +`uvmdvgen` is a Python based tool to generate the boilerplate code for a UVM +agent as well as the complete UVM testbench for a given DUT. The tool generates +all the relevant UVM-based classes including the package and the fusesoc core +file to make it quickly plug-and-playable. The packages import the standard +utility and library packages wherever applicable, to conform to our existing +methodology and style. + +When starting with a new DV effort, the user typically goes through a copy-paste +exercise to replicate an existing UVM testbench code to the current one and has +to go through several debug cycles to get it working. This tool aims to +eliminate that. Also, as a part of our +[DV methodology]({{< relref "doc/ug/dv_methodology#code-reuse" >}}), +we provide utilities and base class structures that contain several +pieces of common code which can be reused when setting up a new DV environment. + +### Help switch (-h) +Running the tool with `-h` switch provides a brief description of all available +switches. +```console +$ util/uvmdvgen/uvmdvgen.py -h +usage: uvmdvgen.py [-h] [-a] [-s] [-e] [-c] [-hi] [-ha] + [-ea agt1 agt2 [agt1 agt2 ...]] [-ao [hw/dv/sv]] + [-eo [hw/ip//dv]] + [ip/block name] + +Command-line tool to autogenerate boilerplate DV testbench code extended from dv_lib / cip_lib + +positional arguments: + [ip/block name] Name of the ip/block for which the UVM TB is being + auto-generated + +optional arguments: + -h, --help show this help message and exit + -a, --gen_agent Generate UVM agent code extended from DV library + -s, --has_separate_host_device_driver + IP / block agent creates a separate driver for host + and device modes. (ignored if -a switch is not passed) + -e, --gen_env Generate testbench UVM env code + -c, --is_cip Is comportable IP - this will result in code being + extended from CIP library. If switch is not passed, + then the code will be extended from DV library + instead. (ignored if -e switch is not passed) + -hr, --has_ral Specify whether the DUT has CSRs and thus needs a UVM + RAL model + -hi, --has_interrupts + CIP has interrupts. Create interrupts interface in tb + -ha, --has_alerts CIP has alerts. Create alerts interface in tb + -ea agt1 agt2 [agt1 agt2 ...], --env_agents agt1 agt2 [agt1 agt2 ...] + Env creates an interface agent specified here. They + are assumed to already exist. Note that the list is + space-separated, and not comma-separated. (ignored if + -e switch is not passed) + -ao [hw/dv/sv], --agent_outdir [hw/dv/sv] + Path to place the agent code. A directory called + _agent is created at this location. (default set + to './') + -eo [hw/ip//dv], --env_outdir [hw/ip//dv] + Path to place the env code. 3 directories are created + - env, tb and tests. (default set to './') +``` + +### Generating UVM agent +The boilerplate code for a UVM agent for an interface can be generated using the +`-a` switch. This results in the generation of complete agent with classes that +extend from the [DV library]({{< relref "hw/dv/sv/dv_lib/README.md" >}}). Please see +that description for more details. + +The tool generates an interface, item, cfg, cov, monitor, driver and sequence +library classes. Let's take `jtag` as the argument passed for the name of the +IP. The following describes their contents in each source generated: +* `jtag_if` + + This is an empty shell of an interface. User is required to add content. + +* `jtag_item` + + This is an empty transaction packet extended from `uvm_sequence_item`. + +* `jtag_agent_cfg` + + This is the agent configuration object, it contains the virtual interface + handle for `jtag_if` and is called `vif`. + +* `jtag_agent_cov` + + This is a coverage component extended from `dv_base_agent_cov`. + +* `jtag_monitor` + + This is the monitor component extended from `dv_base_monitor`. It provides + the following items: + + * `virtual protected task collect_trans(uvm_phase phase)` + + This is a shell task within which user is required to add logic to detect + an event, sample the interface and create a transaction object and write + to the analysis port. This task is called in `dv_base_monitor::run_phase`. + +* `jtag_driver` + + This is the monitor component extended from `jtag_driver` which is typedef'ed + in the pkg to `dv_base_driver` with the right parameter set. It provides the + following items: + + * `virtual task reset_signals()` + + This task is for resetting the initial value of the `vif` signals. + + * `virtual task get_and_drive()` + + This task is used to get the next item from the sequencer, apply it to the + interface and return the response back. This is again, an empty task at the + moment. + + If the `-s` switch is passed, the tool creates `jtag_host_driver` and + `jtag_device_driver` instead, and their contents are exactly the same. + +* `seq_lib/jtag_base_seq` + + This is extended from `dv_base_seq`. + +* `seq_lib/jtag_seq_list` + + This is a list of sequences included in one place. + +* `jtag_agent_pkg` + + This is the package file that includes all of the above sources and the + imports the dependent packages. + +* `jtag_agent.core` + + This is the fusesoc core file that is used to generate the filelist for + the build. + +The tool does not create `jtag_sequencer` or `jtag_agent` classes separately. +Instead, it `typedef`'s the `dv_base_sequencer` and `dv_base_agent` respectively +with the right type-parameters in the `pkg`. The reason for this is having a +dedicated sequencer and agent is not required since the `dv_base_agent` already +has all the sub-component instantiations and connections; and +`dv_base_sequencer` already has a handle to the agent cfg object and nothing +more is typically needed. + +### Generating UVM environment & testbench +The boilerplate code for a UVM environment and the testbench for a DUT can be +generated using the `-e` switch. This results in the generation of classes that +extend from [DV base library]({{< relref "hw/dv/sv/dv_lib/README.md" >}}). +If the `-c` switch is passed, it extends from +[CIP base library]({{< relref "hw/dv/sv/cip_lib/doc" >}}). With `-ea` switch, +user can provide a list of downstream agents to create within the environment. +Please see description for more details. + +The tool generates not only the UVM environment, but also the base test, +testbench, top level fusesoc core file with sim target, Makefile that already +includes the sanity and CSR test suite and more. With just a few tweaks, this +enables the user to reach the V1 milestone much quicker. Let's take `i2c_host` +as the argument passed for the name of the IP. The following is the list of +files generated with a brief description of their contents: + +Switches to indicate whether the CIP DUT contains interrupts or alerts are +provided by `-hi` and `-ha` respectively. By default, these are set to 'False' +(don't create interrupts or alerts). When set, it will create `intr_if` and +`alerts_if` in the testbench and set them into `uvm_config_db` for the +`cip_base_env` to pick up. +* `env/i2c_host_env_cfg` + + This is the env cfg object. It creates the downstream agent cfg objects that + were passed using the `-ea` switch in the `initialize()` function which is + called in the `dv_base_test::build_phase()`. Since the cfg handle is passed to + all env components, those downstream agent cfg objects can be hierarchically + referenced. + +* `env/i2c_host_env_cov` + + This is the coverage component class. A handle of this class is passed to the + scoreboard and the virtual sequencer so that covergroups can be sampled in the + scoreboard as well as sequences. + +* `env/i2c_host_reg_block` + + This is the UVM reg based RAL model. This is created for completeness. The + actual RAL model needs to be generated prior to running simulations using the + [regtool]({{< relref "util/reggen/README.md" >}}). + +* `env/i2c_host_scoreboard` + + This is the scoreboard component that already creates the analysis fifos and + queues for the agents passed via `-ea` switch. It adds starter tasks for + processing each fifo in a forever loop and invokes them in the `run_phase` + using `fork-join` statement. If the `-c` switch is passed, it also adds a + `process_tl_access` task that is extended from `cip_base_scoreboard`. This + task provides a tilelink access packet for further processing. + +* `env/i2c_host_virtual_sequencer` + + This is the virtual sequencer used by all test sequences to run the traffic. + It adds handles to downstream agent sequencers passed via `-ea` switch. + Sub-sequences can be started on them via the `p_sequencer` handle. + +* `env/seq_lib/i2c_host_base_vseq` + + This is the base virtual sequence that user can use to add common tasks, + functions and variables that other extended test sequences can reuse. For + starters, it provides the `i2c_host_init()` task and `do_i2c_host_init` knob + for controllability. + +* `env/seq_lib/i2c_host_sanity_vseq` + + This is the basic sanity test sequence that user needs to develop as the first + test sequence. It extends from `i2c_host_base_vseq`. + +* `env/seq_lib/i2c_host_csr_vseq` + + This is the test sequence for the entire CSR suite of tests. It calls + `dv_base_vseq::run_csr_vseq_wrapper()` task which is a complete test sequence. + All the user needs to do is run the CSR tests and add exclusions if needed + using the `add_csr_exclusions()` function provided. + +* `env/seq_lib/i2c_host_vseq_list` + + This is a list of test sequences included in one place. + +* `env/i2c_host_env` + + This is the env class that creates the downstream agents passed via `-ea` + switch. It sets their correspodnding cfg objects (which are members of env cfg + object) into the `uvm_config_db`. It also makes the analysis port connections + in the `connect_phase` and sets the sequencer handles in the virtual + sequencer. + +* `env/i2c_host_env_pkg` + + This is the env pkg file which includes all env classes and imports the + dependent packages. + +* `env/i2c_host_env.core` + + This is the fusesoc core file for the env pkg compile unit. + +* `tests/i2c_host_base_test` + + This is the base test class. The base test class it extends from already + creates the `env` and `cfg` objects, which are available for manipulation in + UVM phases. This class's name would be supplied to UVM_TESTNAME plusarg to run + tests using the UVM methodology. + +* `tests/i2c_host_test_pkg` + + This is the test pkg file which includes all test classes and imports the + dependent packages. + +* `tests/i2c_host_test.core` + + This is the fusesoc core file for the test pkg compile unit. + +* `tb/i2c_host_bind` + + This is the assertion bind file that is compiled along with the testbench in a + multi-top architecture. If the `-c` switch is passed, it adds the + `tlul_assert` module bind to the `i2c_host` DUT. + +* `tb/tb` + + This is the top level testbench module that instantiates the DUT along with + some of the interfaces that are required to be instantiated and connected and + passed on the the `uvm_config_db` since the base DV/CIP library classes + retrieve them. The user needs to look through the RTL and make additional + connections as needed. + +* `i2c_host_sim.core` + + This is the top level fusesoc core file with the sim target. It adds the RTL + and DV dependencies to construct the complete filelist to pass to simulator's + build step. + +* `Makefile` + + This is the simulation Makefile that is used as the starting point for + building and running tests using the [make flow]({{< relref "hw/dv/tools/README.md" >}}). + It already includes the sanity and CSR suite of tests to allow users to start + running tests right away. + +* `i2c_host_dv_plan.md` + + This is the initial DV plan document that will describe the entire testbench. This + is equivalent to the template available [here](https://github.com/lowRISC/opentitan/blob/master/hw/dv/doc/dv_plan_template.md). + +#### Examples +```console +$ util/uvmdvgen/uvmdvgen.py i2c -a +``` +This will create `./i2c/i2c_agent` and place all sources there. + +```console +$ util/uvmdvgen/uvmdvgen.py jtag -a -ao hw/dv/sv +``` +This will create `hw/dv/sv/jtag_agent` directory and place all the sources +there. + +```console +$ util/uvmdvgen/uvmdvgen.py i2c -a -s -ao hw/dv/sv +``` +This will create the I2C agent with separate 'host' mode and 'device' mode drivers. + +```console +$ util/uvmdvgen/uvmdvgen.py i2c -e -c -hi -eo hw/ip/i2c/dv +``` +This is an illegal command, it is not allowed to specify that an IP testbench +extends from CIP lib or has interrupts without specifying that it should support +a RAL model using the `-hr` flag. + +```console +$ util/uvmdvgen/uvmdvgen.py i2c_host -e -c -hi -hr -ea i2c -eo hw/ip/i2c_host/dv +``` +This will create the complete `i2c_host` DV testbench extended from CIP lib and will +instantiate `i2c_agent`. It will also create and hook up the interrupt interface +in the testbench. + +```console +$ util/uvmdvgen/uvmdvgen.py foo -e -c -hi -ha -hr -ea foo -eo hw/ip/i2c_host/dv +``` +This will create the complete foo DV testbench extended from CIP lib and +will instantiate `foo_agent`. It will also create and hook up the interrupt interface +as well as alerts interface in the testbench. + +```console +$ util/uvmdvgen/uvmdvgen.py aes -e -c -hr -ea i2c -eo hw/ip/i2c_host/dv +``` +This will create the complete `i2c_host` DV testbench extended from CIP lib and will +instantiate `i2c_agent`. + +```console +$ util/uvmdvgen/uvmdvgen.py dma -e -eo hw/ip/dma/dv +``` +This will create the complete dma DV testbench extended from DV lib. It does not +instantiate any downstream agents due to absence of `-ea` switch. + +```console +$ util/uvmdvgen/uvmdvgen.py chip -e -ea uart i2c jtag -eo hw/top_earlgrey/dv +``` +This will create the complete chip testbench DV lib and will instantiate +`uart_agent`, `i2c_agent` and `jtag_agent` in the env. diff --git a/vendor/lowrisc_ip/uvmdvgen/README.md.tpl b/vendor/lowrisc_ip/uvmdvgen/README.md.tpl new file mode 100644 index 00000000..775ce5f7 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/README.md.tpl @@ -0,0 +1,3 @@ +# ${name.upper()} UVM Agent + +${name.upper()} UVM Agent is extended from DV library agent classes. diff --git a/vendor/lowrisc_ip/uvmdvgen/__init__.py b/vendor/lowrisc_ip/uvmdvgen/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/vendor/lowrisc_ip/uvmdvgen/agent.core.tpl b/vendor/lowrisc_ip/uvmdvgen/agent.core.tpl new file mode 100644 index 00000000..e71c9fa6 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/agent.core.tpl @@ -0,0 +1,33 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:${name}_agent:0.1" +description: "${name.upper()} DV UVM agent" +filesets: + files_dv: + depend: + - lowrisc:dv:dv_utils + - lowrisc:dv:dv_lib + files: + - ${name}_if.sv + - ${name}_agent_pkg.sv + - ${name}_item.sv: {is_include_file: true} + - ${name}_agent_cfg.sv: {is_include_file: true} + - ${name}_agent_cov.sv: {is_include_file: true} +% if has_separate_host_device_driver: + - ${name}_host_driver.sv: {is_include_file: true} + - ${name}_device_driver.sv: {is_include_file: true} +% else: + - ${name}_driver.sv: {is_include_file: true} +% endif + - ${name}_monitor.sv: {is_include_file: true} + - ${name}_agent.sv: {is_include_file: true} + - seq_lib/${name}_base_seq.sv: {is_include_file: true} + - seq_lib/${name}_seq_list.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/uvmdvgen/agent.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/agent.sv.tpl new file mode 100644 index 00000000..e4113586 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/agent.sv.tpl @@ -0,0 +1,29 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_agent extends dv_base_agent #( + .CFG_T (${name}_agent_cfg), + .DRIVER_T (${name}_driver), +% if has_separate_host_device_driver: + .HOST_DRIVER_T (${name}_host_driver), + .DEVICE_DRIVER_T(${name}_device_driver), +% endif + .SEQUENCER_T (${name}_sequencer), + .MONITOR_T (${name}_monitor), + .COV_T (${name}_agent_cov) +); + + `uvm_component_utils(${name}_agent) + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + // get ${name}_if handle + if (!uvm_config_db#(virtual ${name}_if)::get(this, "", "vif", cfg.vif)) begin + `uvm_fatal(`gfn, "failed to get ${name}_if handle from uvm_config_db") + end + endfunction + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/agent_cfg.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/agent_cfg.sv.tpl new file mode 100644 index 00000000..f693d577 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/agent_cfg.sv.tpl @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_agent_cfg extends dv_base_agent_cfg; + + // interface handle used by driver, monitor & the sequencer, via cfg handle + virtual ${name}_if vif; + + `uvm_object_utils_begin(${name}_agent_cfg) + `uvm_object_utils_end + + `uvm_object_new + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/agent_cov.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/agent_cov.sv.tpl new file mode 100644 index 00000000..91e48d42 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/agent_cov.sv.tpl @@ -0,0 +1,18 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_agent_cov extends dv_base_agent_cov #(${name}_agent_cfg); + `uvm_component_utils(${name}_agent_cov) + + // the base class provides the following handles for use: + // ${name}_agent_cfg: cfg + + // covergroups + + function new(string name, uvm_component parent); + super.new(name, parent); + // instantiate all covergroups here + endfunction : new + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/agent_pkg.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/agent_pkg.sv.tpl new file mode 100644 index 00000000..b8357488 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/agent_pkg.sv.tpl @@ -0,0 +1,49 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package ${name}_agent_pkg; + // dep packages + import uvm_pkg::*; + import dv_utils_pkg::*; + import dv_lib_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // parameters + + // local types + // forward declare classes to allow typedefs below + typedef class ${name}_item; + typedef class ${name}_agent_cfg; + +% if has_separate_host_device_driver: + // add typedef for ${name}_driver which is dv_base_driver with the right parameter set + // ${name}_host_driver and ${name}_device_driver will extend from this + typedef dv_base_driver #(.ITEM_T(${name}_item), + .CFG_T (${name}_agent_cfg)) ${name}_driver; + +% endif + // reuse dv_base_seqeuencer as is with the right parameter set + typedef dv_base_sequencer #(.ITEM_T(${name}_item), + .CFG_T (${name}_agent_cfg)) ${name}_sequencer; + + // functions + + // package sources + `include "${name}_item.sv" + `include "${name}_agent_cfg.sv" + `include "${name}_agent_cov.sv" +% if has_separate_host_device_driver: + `include "${name}_host_driver.sv" + `include "${name}_device_driver.sv" +% else: + `include "${name}_driver.sv" +% endif + `include "${name}_monitor.sv" + `include "${name}_agent.sv" + `include "${name}_seq_list.sv" + +endpackage: ${name}_agent_pkg diff --git a/vendor/lowrisc_ip/uvmdvgen/base_seq.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/base_seq.sv.tpl new file mode 100644 index 00000000..bc4eea9b --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/base_seq.sv.tpl @@ -0,0 +1,18 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_base_seq extends dv_base_seq #( + .REQ (${name}_item), + .CFG_T (${name}_agent_cfg), + .SEQUENCER_T (${name}_sequencer) + ); + `uvm_object_utils(${name}_base_seq) + + `uvm_object_new + + virtual task body(); + `uvm_fatal(`gtn, "Need to override this when you extend from this class!") + endtask + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/base_test.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/base_test.sv.tpl new file mode 100644 index 00000000..e0b2a3b8 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/base_test.sv.tpl @@ -0,0 +1,32 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +% if is_cip: +class ${name}_base_test extends cip_base_test #( +% else: +class ${name}_base_test extends dv_base_test #( +% endif + .CFG_T(${name}_env_cfg), + .ENV_T(${name}_env) + ); + + `uvm_component_utils(${name}_base_test) + `uvm_component_new + + // the base class dv_base_test creates the following instances: + // ${name}_env_cfg: cfg + // ${name}_env: env + +% if not has_ral: + virtual function void build_phase(uvm_phase phase); + super.build_phase(phase); + cfg.has_ral = 1'b0; + endfunction +% endif + + // the base class also looks up UVM_TEST_SEQ plusarg to create and run that seq in + // the run_phase; as such, nothing more needs to be done + +endclass : ${name}_base_test + diff --git a/vendor/lowrisc_ip/uvmdvgen/base_vseq.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/base_vseq.sv.tpl new file mode 100644 index 00000000..ce11bb9c --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/base_vseq.sv.tpl @@ -0,0 +1,39 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +% if is_cip: +class ${name}_base_vseq extends cip_base_vseq #( +% else: +class ${name}_base_vseq extends dv_base_vseq #( +% endif +% if has_ral: + .RAL_T (${name}_reg_block), +% endif + .CFG_T (${name}_env_cfg), + .COV_T (${name}_env_cov), + .VIRTUAL_SEQUENCER_T (${name}_virtual_sequencer) + ); + `uvm_object_utils(${name}_base_vseq) + + // various knobs to enable certain routines + bit do_${name}_init = 1'b1; + + `uvm_object_new + + virtual task dut_init(string reset_kind = "HARD"); + super.dut_init(); + if (do_${name}_init) ${name}_init(); + endtask + + virtual task dut_shutdown(); + // check for pending ${name} operations and wait for them to complete + // TODO + endtask + + // setup basic ${name} features + virtual task ${name}_init(); + `uvm_error(`gfn, "FIXME") + endtask + +endclass : ${name}_base_vseq diff --git a/vendor/lowrisc_ip/uvmdvgen/bind.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/bind.sv.tpl new file mode 100644 index 00000000..75f9325d --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/bind.sv.tpl @@ -0,0 +1,18 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +module ${name}_bind; +% if is_cip: + + bind ${name} tlul_assert #( + .EndpointType("Device") + ) tlul_assert_device ( + .clk_i, + .rst_ni, + .h2d (tl_i), + .d2h (tl_o) + ); +% endif + +endmodule diff --git a/vendor/lowrisc_ip/uvmdvgen/common_vseq.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/common_vseq.sv.tpl new file mode 100644 index 00000000..8a32d180 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/common_vseq.sv.tpl @@ -0,0 +1,21 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_common_vseq extends ${name}_base_vseq; + `uvm_object_utils(${name}_common_vseq) + + constraint num_trans_c { + num_trans inside {[1:2]}; + } + `uvm_object_new + + virtual task body(); +% if is_cip: + run_common_vseq_wrapper(num_trans); +% elif has_ral: + run_csr_vseq_wrapper(num_trans); +% endif + endtask : body + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/cov_excl.el.tpl b/vendor/lowrisc_ip/uvmdvgen/cov_excl.el.tpl new file mode 100644 index 00000000..18be5a79 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/cov_excl.el.tpl @@ -0,0 +1,6 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// coverage exclusion file for ${name} +// TODO add coverage exclusion below diff --git a/vendor/lowrisc_ip/uvmdvgen/device_driver.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/device_driver.sv.tpl new file mode 100644 index 00000000..ae46511d --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/device_driver.sv.tpl @@ -0,0 +1,26 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_device_driver extends ${name}_driver; + `uvm_component_utils(${name}_device_driver) + + // the base class provides the following handles for use: + // ${name}_agent_cfg: cfg + + `uvm_component_new + + virtual task run_phase(uvm_phase phase); + // base class forks off reset_signals() and get_and_drive() tasks + super.run_phase(phase); + endtask + + // reset signals + virtual task reset_signals(); + endtask + + // drive trans received from sequencer + virtual task get_and_drive(); + endtask + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/driver.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/driver.sv.tpl new file mode 100644 index 00000000..074280cf --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/driver.sv.tpl @@ -0,0 +1,37 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_driver extends dv_base_driver #(${name}_item, ${name}_agent_cfg); + `uvm_component_utils(${name}_driver) + + // the base class provides the following handles for use: + // ${name}_agent_cfg: cfg + + `uvm_component_new + + virtual task run_phase(uvm_phase phase); + // base class forks off reset_signals() and get_and_drive() tasks + super.run_phase(phase); + endtask + + // reset signals + virtual task reset_signals(); + endtask + + // drive trans received from sequencer + virtual task get_and_drive(); + forever begin + seq_item_port.get_next_item(req); + $cast(rsp, req.clone()); + rsp.set_id_info(req); + `uvm_info(`gfn, $sformatf("rcvd item:\n%0s", req.sprint()), UVM_HIGH) + // TODO: do the driving part + // + // send rsp back to seq + `uvm_info(`gfn, "item sent", UVM_HIGH) + seq_item_port.item_done(rsp); + end + endtask + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/dv_plan.md.tpl b/vendor/lowrisc_ip/uvmdvgen/dv_plan.md.tpl new file mode 100644 index 00000000..43d8f6c3 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/dv_plan.md.tpl @@ -0,0 +1,130 @@ +--- +title: "${name.upper()} DV Plan" +--- + + + +${'##'} Goals +* **DV** + * Verify all ${name.upper()} IP features by running dynamic simulations with a SV/UVM based testbench + * Develop and run all tests based on the [testplan](#testplan) below towards closing code and functional coverage on the IP and all of its sub-modules +* **FPV** + * Verify TileLink device protocol compliance with an SVA based testbench + +${'##'} Current status +* [Design & verification stage]({{< relref "doc/project/hw_dashboard" >}}) + * [HW development stages]({{< relref "doc/project/hw_stages" >}}) +* [Simulation results](https://reports.opentitan.org/hw/ip/${name}/dv/latest/results.html) + +${'##'} Design features +For detailed information on ${name.upper()} design features, please see the [${name.upper()} HWIP technical specification]({{< relref "hw/ip/${name}/doc" >}}). + +${'##'} Testbench architecture +${name.upper()} testbench has been constructed based on the [CIP testbench architecture]({{< relref "hw/dv/sv/cip_lib/doc" >}}). + +${'###'} Block diagram +![Block diagram](tb.svg) + +${'###'} Top level testbench +Top level testbench is located at `hw/ip/${name}/dv/tb/tb.sv`. It instantiates the ${name.upper()} DUT module `hw/ip/${name}/rtl/${name}.sv`. +In addition, it instantiates the following interfaces, connects them to the DUT and sets their handle into `uvm_config_db`: +* [Clock and reset interface]({{< relref "hw/dv/sv/common_ifs" >}}) +* [TileLink host interface]({{< relref "hw/dv/sv/tl_agent/README.md" >}}) +* ${name.upper()} IOs +* Interrupts ([`pins_if`]({{< relref "hw/dv/sv/common_ifs" >}}) +* Alerts ([`pins_if`]({{< relref "hw/dv/sv/common_ifs" >}}) +* Devmode ([`pins_if`]({{< relref "hw/dv/sv/common_ifs" >}}) + +${'###'} Common DV utility components +The following utilities provide generic helper tasks and functions to perform activities that are common across the project: +* [dv_utils_pkg]({{< relref "hw/dv/sv/dv_utils/README.md" >}}) +* [csr_utils_pkg]({{< relref "hw/dv/sv/csr_utils/README.md" >}}) + +${'###'} Compile-time configurations +[list compile time configurations, if any and what are they used for] + +${'###'} Global types & methods +All common types and methods defined at the package level can be found in +`${name}_env_pkg`. Some of them in use are: +```systemverilog +[list a few parameters, types & methods; no need to mention all] +``` +% if is_cip: + +${'###'} TL_agent +${name.upper()} testbench instantiates (already handled in CIP base env) [tl_agent]({{< relref "hw/dv/sv/tl_agent/README.md" >}}) +which provides the ability to drive and independently monitor random traffic via +TL host interface into ${name.upper()} device. +% endif + +% for agent in env_agents: +${'### '} ${agent.upper()} Agent +[Describe here or add link to its README] +% endfor + +${'###'} UVC/agent 1 +[Describe here or add link to its README] + +${'###'} UVC/agent 2 +[Describe here or add link to its README] + +% if has_ral: +${'###'} UVM RAL Model +The ${name.upper()} RAL model is created with the `hw/dv/tools/gen_ral_pkg.py` wrapper script at the start of the simulation automatically and is placed in the build area, along with a corresponding `fusesoc` core file. +The wrapper script invokes the [regtool.py]({{< relref "util/reggen/README.md" >}}) script from within to generate the RAL model. + +It can be created manually by running `make ral` command from the `dv` area. +% endif + +${'###'} Reference models +[Describe reference models in use if applicable, example: SHA256/HMAC] + +${'###'} Stimulus strategy +${'####'} Test sequences +All test sequences reside in `hw/ip/${name}/dv/env/seq_lib`. +The `${name}_base_vseq` virtual sequence is extended from `cip_base_vseq` and serves as a starting point. +All test sequences are extended from `${name}_base_vseq`. +It provides commonly used handles, variables, functions and tasks that the test sequences can simple use / call. +Some of the most commonly used tasks / functions are as follows: +* task 1: +* task 2: + +${'####'} Functional coverage +To ensure high quality constrained random stimulus, it is necessary to develop a functional coverage model. +The following covergroups have been developed to prove that the test intent has been adequately met: +* cg1: +* cg2: + +${'###'} Self-checking strategy +${'####'} Scoreboard +The `${name}_scoreboard` is primarily used for end to end checking. +It creates the following analysis ports to retrieve the data monitored by corresponding interface agents: +* analysis port1: +* analysis port2: + + +${'####'} Assertions +* TLUL assertions: The `tb/${name}_bind.sv` binds the `tlul_assert` [assertions]({{< relref "hw/ip/tlul/doc/TlulProtocolChecker.md" >}}) to the IP to ensure TileLink interface protocol compliance. +* Unknown checks on DUT outputs: The RTL has assertions to ensure all outputs are initialized to known values after coming out of reset. +* assert prop 1: +* assert prop 2: + +${'##'} Building and running tests +We are using our in-house developed [regression tool]({{< relref "hw/dv/tools/README.md" >}}) for building and running our tests and regressions. +Please take a look at the link for detailed information on the usage, capabilities, features and known issues. +Here's how to run a basic sanity test: +```console +$ cd hw/ip/${name}/dv +$ make TEST_NAME=${name}_sanity +``` + +${'##'} Testplan + +{{}} diff --git a/vendor/lowrisc_ip/uvmdvgen/env.core.tpl b/vendor/lowrisc_ip/uvmdvgen/env.core.tpl new file mode 100644 index 00000000..dd1ba33a --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/env.core.tpl @@ -0,0 +1,39 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:${name}_env:0.1" +description: "${name.upper()} DV UVM environment" +filesets: + files_dv: + depend: +% if is_cip: + - lowrisc:dv:cip_lib +% else: + - lowrisc:dv:dv_lib +% endif +% for agent in env_agents: + - lowrisc:dv:${agent}_agent +% endfor +% if has_ral: + - lowrisc:dv:gen_ral_pkg +% endif + files: + - ${name}_env_pkg.sv + - ${name}_env_cfg.sv: {is_include_file: true} + - ${name}_env_cov.sv: {is_include_file: true} +% if env_agents != []: + - ${name}_virtual_sequencer.sv: {is_include_file: true} +% endif + - ${name}_scoreboard.sv: {is_include_file: true} + - ${name}_env.sv: {is_include_file: true} + - seq_lib/${name}_vseq_list.sv: {is_include_file: true} + - seq_lib/${name}_base_vseq.sv: {is_include_file: true} + - seq_lib/${name}_common_vseq.sv: {is_include_file: true} + - seq_lib/${name}_sanity_vseq.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/uvmdvgen/env.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/env.sv.tpl new file mode 100644 index 00000000..230a6123 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/env.sv.tpl @@ -0,0 +1,52 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +% if is_cip: +class ${name}_env extends cip_base_env #( +% else: +class ${name}_env extends dv_base_env #( +% endif + .CFG_T (${name}_env_cfg), + .COV_T (${name}_env_cov), + .VIRTUAL_SEQUENCER_T(${name}_virtual_sequencer), + .SCOREBOARD_T (${name}_scoreboard) + ); + `uvm_component_utils(${name}_env) +% if env_agents != []: + +% for agent in env_agents: + ${agent}_agent m_${agent}_agent; +% endfor +% endif + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); +% for agent in env_agents: + // create components + m_${agent}_agent = ${agent}_agent::type_id::create("m_${agent}_agent", this); + uvm_config_db#(${agent}_agent_cfg)::set(this, "m_${agent}_agent*", "cfg", cfg.m_${agent}_agent_cfg); +% endfor + endfunction + + function void connect_phase(uvm_phase phase); + super.connect_phase(phase); +% if env_agents != []: + if (cfg.en_scb) begin +% endif +% for agent in env_agents: + m_${agent}_agent.monitor.analysis_port.connect(scoreboard.${agent}_fifo.analysis_export); +% endfor +% if env_agents != []: + end +% endif +% for agent in env_agents: + if (cfg.is_active && cfg.m_${agent}_agent_cfg.is_active) begin + virtual_sequencer.${agent}_sequencer_h = m_${agent}_agent.sequencer; + end +% endfor + endfunction + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/env_cfg.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/env_cfg.sv.tpl new file mode 100644 index 00000000..71cbb742 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/env_cfg.sv.tpl @@ -0,0 +1,52 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +% if is_cip: +class ${name}_env_cfg extends cip_base_env_cfg #(.RAL_T(${name}_reg_block)); +% elif has_ral: +class ${name}_env_cfg extends dv_base_env_cfg #(.RAL_T(${name}_reg_block)); +% else: +class ${name}_env_cfg extends dv_base_env_cfg; +% endif + + // ext component cfgs +% for agent in env_agents: + rand ${agent}_agent_cfg m_${agent}_agent_cfg; +% endfor + + `uvm_object_utils_begin(${name}_env_cfg) +% for agent in env_agents: + `uvm_field_object(m_${agent}_agent_cfg, UVM_DEFAULT) +% endfor + `uvm_object_utils_end + + `uvm_object_new + +% if has_ral: + virtual function void initialize_csr_addr_map_size(); + this.csr_addr_map_size = ${name.upper()}_ADDR_MAP_SIZE; + endfunction : initialize_csr_addr_map_size +% endif + + virtual function void initialize(bit [31:0] csr_base_addr = '1); +% if has_ral: + super.initialize(csr_base_addr); +% endif +% for agent in env_agents: + // create ${agent} agent config obj + m_${agent}_agent_cfg = ${agent}_agent_cfg::type_id::create("m_${agent}_agent_cfg"); +% endfor +% if is_cip: + + // set num_interrupts & num_alerts + begin + uvm_reg rg = ral.get_reg_by_name("intr_state"); + if (rg != null) begin + num_interrupts = ral.intr_state.get_n_used_bits(); + end + end +% endif + endfunction + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/env_cov.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/env_cov.sv.tpl new file mode 100644 index 00000000..18cd77c5 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/env_cov.sv.tpl @@ -0,0 +1,38 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +/** + * Covergoups that are dependent on run-time parameters that may be available + * only in build_phase can be defined here + * Covergroups may also be wrapped inside helper classes if needed. + */ + +% if is_cip: +class ${name}_env_cov extends cip_base_env_cov #(.CFG_T(${name}_env_cfg)); +% else: +class ${name}_env_cov extends dv_base_env_cov #(.CFG_T(${name}_env_cfg)); +% endif + `uvm_component_utils(${name}_env_cov) + + // the base class provides the following handles for use: + // ${name}_env_cfg: cfg + + // covergroups + // [add covergroups here] + + function new(string name, uvm_component parent); + super.new(name, parent); + // [instantiate covergroups here] + endfunction : new + + virtual function void build_phase(uvm_phase phase); + super.build_phase(phase); + // [or instantiate covergroups here] +% if is_cip: + // Please instantiate sticky_intr_cov array of objects for all interrupts that are sticky + // See cip_base_env_cov for details +% endif + endfunction + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/env_pkg.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/env_pkg.sv.tpl new file mode 100644 index 00000000..abe79b4c --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/env_pkg.sv.tpl @@ -0,0 +1,48 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package ${name}_env_pkg; + // dep packages + import uvm_pkg::*; + import top_pkg::*; + import dv_utils_pkg::*; +% for agent in env_agents: + import ${agent}_agent_pkg::*; +% endfor + import dv_lib_pkg::*; +% if is_cip: + import tl_agent_pkg::*; + import cip_base_pkg::*; +% endif +% if has_ral: + import csr_utils_pkg::*; + import ${name}_ral_pkg::*; +% endif + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // parameters +% if has_ral: + // TODO update below, or compile error occurs + parameter uint ${name.upper()}_ADDR_MAP_SIZE = ; +% endif + + // types +% if not has_ral: + typedef dv_base_reg_block ${name}_reg_block; +% endif + + // functions + + // package sources + `include "${name}_env_cfg.sv" + `include "${name}_env_cov.sv" + `include "${name}_virtual_sequencer.sv" + `include "${name}_scoreboard.sv" + `include "${name}_env.sv" + `include "${name}_vseq_list.sv" + +endpackage diff --git a/vendor/lowrisc_ip/uvmdvgen/gen_agent.py b/vendor/lowrisc_ip/uvmdvgen/gen_agent.py new file mode 100644 index 00000000..5f96d54f --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/gen_agent.py @@ -0,0 +1,62 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +"""Generate SystemVerilog UVM agent extended freom our DV lib +""" + +import os + +from mako import exceptions +from mako.template import Template +from pkg_resources import resource_filename + + +def gen_agent(name, has_separate_host_device_driver, root_dir): + # set sub name + agent_dir = root_dir + "/" + name + "_agent" + + # yapf: disable + # 4-tuple - path, ip name, class name, file ext + agent_srcs = [(agent_dir, name + '_', 'if', '.sv'), + (agent_dir, name + '_', 'item', '.sv'), + (agent_dir, name + '_', 'agent_cfg', '.sv'), + (agent_dir, name + '_', 'agent_cov', '.sv'), + (agent_dir, name + '_', 'monitor', '.sv'), + (agent_dir, name + '_', 'driver', '.sv'), + (agent_dir, name + '_', 'host_driver', '.sv'), + (agent_dir, name + '_', 'device_driver', '.sv'), + (agent_dir, name + '_', 'agent_pkg', '.sv'), + (agent_dir, name + '_', 'agent', '.sv'), + (agent_dir, name + '_', 'agent', '.core'), + (agent_dir, "", 'README', '.md'), + (agent_dir + "/seq_lib", name + '_', 'seq_list', '.sv'), + (agent_dir + "/seq_lib", name + '_', 'base_seq', '.sv')] + # yapf: enable + + for tup in agent_srcs: + path_dir = tup[0] + src_prefix = tup[1] + src = tup[2] + src_suffix = tup[3] + + if has_separate_host_device_driver: + if src == "driver": continue + else: + if src == "host_driver": continue + if src == "device_driver": continue + + ftpl = src + src_suffix + '.tpl' + fname = src_prefix + src + src_suffix + + # read template + tpl = Template(filename=resource_filename('uvmdvgen', ftpl)) + + if not os.path.exists(path_dir): os.system("mkdir -p " + path_dir) + with open(path_dir + "/" + fname, 'w') as fout: + try: + fout.write( + tpl.render(name=name, + has_separate_host_device_driver= + has_separate_host_device_driver)) + except: + log.error(exceptions.text_error_template().render()) diff --git a/vendor/lowrisc_ip/uvmdvgen/gen_env.py b/vendor/lowrisc_ip/uvmdvgen/gen_env.py new file mode 100644 index 00000000..35cd6cc9 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/gen_env.py @@ -0,0 +1,70 @@ +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +"""Generate SystemVerilog UVM agent extended freom our DV lib +""" + +import os + +from mako.template import Template +from pkg_resources import resource_filename + + +def gen_env(name, is_cip, has_ral, has_interrupts, has_alerts, env_agents, + root_dir, add_makefile): + # yapf: disable + # 4-tuple - sub-path, ip name, class name, file ext + env_srcs = [('dv/env', name + '_', 'env_cfg', '.sv'), + ('dv/env', name + '_', 'env_cov', '.sv'), + ('dv/env', name + '_', 'env_pkg', '.sv'), + ('dv/env', name + '_', 'scoreboard', '.sv'), + ('dv/env', name + '_', 'virtual_sequencer', '.sv'), + ('dv/env', name + '_', 'env', '.sv'), + ('dv/env', name + '_', 'env', '.core'), + ('dv/env/seq_lib', name + '_', 'base_vseq', '.sv'), + ('dv/env/seq_lib', name + '_', 'sanity_vseq', '.sv'), + ('dv/env/seq_lib', name + '_', 'common_vseq', '.sv'), + ('dv/env/seq_lib', name + '_', 'vseq_list', '.sv'), + ('dv/tb', '', 'tb', '.sv'), + ('dv/tb', name + '_', 'bind', '.sv'), + ('dv/tests', name + '_', 'base_test', '.sv'), + ('dv/tests', name + '_', 'test_pkg', '.sv'), + ('dv/tests', name + '_', 'test', '.core'), + ('dv/cov', '', '', ''), + ('dv', '', 'Makefile', ''), + ('dv', name + '_', 'sim_cfg', '.hjson'), + ('doc', name + '_', 'dv_plan', '.md'), + ('data', name + '_', 'testplan', '.hjson'), + ('dv', name + '_', 'sim', '.core')] + # yapf: enable + + for tup in env_srcs: + path_dir = root_dir + '/' + tup[0] + src_prefix = tup[1] + src = tup[2] + src_suffix = tup[3] + + # Skip Makefile + if src == 'Makefile' and not add_makefile: continue + + ftpl = src + src_suffix + '.tpl' + fname = src_prefix + src + src_suffix + + if not os.path.exists(path_dir): os.system("mkdir -p " + path_dir) + if fname == "": continue + + # read template + tpl = Template(filename=resource_filename('uvmdvgen', ftpl)) + + # create rendered file + with open(path_dir + "/" + fname, 'w') as fout: + try: + fout.write( + tpl.render(name=name, + is_cip=is_cip, + has_ral=has_ral, + has_interrupts=has_interrupts, + has_alerts=has_alerts, + env_agents=env_agents)) + except: + log.error(exceptions.text_error_template().render()) diff --git a/vendor/lowrisc_ip/uvmdvgen/host_driver.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/host_driver.sv.tpl new file mode 100644 index 00000000..08103e0b --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/host_driver.sv.tpl @@ -0,0 +1,37 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_host_driver extends ${name}_driver; + `uvm_component_utils(${name}_host_driver) + + // the base class provides the following handles for use: + // ${name}_agent_cfg: cfg + + `uvm_component_new + + virtual task run_phase(uvm_phase phase); + // base class forks off reset_signals() and get_and_drive() tasks + super.run_phase(phase); + endtask + + // reset signals + virtual task reset_signals(); + endtask + + // drive trans received from sequencer + virtual task get_and_drive(); + forever begin + seq_item_port.get_next_item(req); + $cast(rsp, req.clone()); + rsp.set_id_info(req); + `uvm_info(`gfn, $sformatf("rcvd item:\n%0s", req.sprint()), UVM_HIGH) + // TODO: do the driving part + // + // send rsp back to seq + `uvm_info(`gfn, "item sent", UVM_HIGH) + seq_item_port.item_done(rsp); + end + endtask + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/if.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/if.sv.tpl new file mode 100644 index 00000000..5f15a32b --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/if.sv.tpl @@ -0,0 +1,11 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +interface ${name}_if (); + + // interface pins + + // debug signals + +endinterface diff --git a/vendor/lowrisc_ip/uvmdvgen/item.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/item.sv.tpl new file mode 100644 index 00000000..715c6d15 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/item.sv.tpl @@ -0,0 +1,14 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_item extends uvm_sequence_item; + + // random variables + + `uvm_object_utils_begin(${name}_item) + `uvm_object_utils_end + + `uvm_object_new + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/monitor.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/monitor.sv.tpl new file mode 100644 index 00000000..7366fc1c --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/monitor.sv.tpl @@ -0,0 +1,43 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +class ${name}_monitor extends dv_base_monitor #( + .ITEM_T (${name}_item), + .CFG_T (${name}_agent_cfg), + .COV_T (${name}_agent_cov) + ); + `uvm_component_utils(${name}_monitor) + + // the base class provides the following handles for use: + // ${name}_agent_cfg: cfg + // ${name}_agent_cov: cov + // uvm_analysis_port #(${name}_item): analysis_port + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); + endfunction + + task run_phase(uvm_phase phase); + super.run_phase(phase); + endtask + + // collect transactions forever - already forked in dv_base_moditor::run_phase + virtual protected task collect_trans(uvm_phase phase); + forever begin + // TODO: detect event + + // TODO: sample the interface + + // TODO: sample the covergroups + + // TODO: write trans to analysis_port + + // TODO: remove the line below: it is added to prevent zero delay loop in template code + #1us; + end + endtask + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/sanity_vseq.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/sanity_vseq.sv.tpl new file mode 100644 index 00000000..7fbc61b0 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/sanity_vseq.sv.tpl @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// basic sanity test vseq +class ${name}_sanity_vseq extends ${name}_base_vseq; + `uvm_object_utils(${name}_sanity_vseq) + + `uvm_object_new + + task body(); + `uvm_error(`gfn, "FIXME") + endtask : body + +endclass : ${name}_sanity_vseq diff --git a/vendor/lowrisc_ip/uvmdvgen/scoreboard.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/scoreboard.sv.tpl new file mode 100644 index 00000000..ffac8149 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/scoreboard.sv.tpl @@ -0,0 +1,116 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +% if is_cip: +class ${name}_scoreboard extends cip_base_scoreboard #( +% else: +class ${name}_scoreboard extends dv_base_scoreboard #( +% endif + .CFG_T(${name}_env_cfg), +% if has_ral: + .RAL_T(${name}_reg_block), +% endif + .COV_T(${name}_env_cov) + ); + `uvm_component_utils(${name}_scoreboard) + + // local variables + + // TLM agent fifos +% for agent in env_agents: + uvm_tlm_analysis_fifo #(${agent}_item) ${agent}_fifo; +% endfor + + // local queues to hold incoming packets pending comparison +% for agent in env_agents: + ${agent}_item ${agent}_q[$]; +% endfor + + `uvm_component_new + + function void build_phase(uvm_phase phase); + super.build_phase(phase); +% for agent in env_agents: + ${agent}_fifo = new("${agent}_fifo", this); +% endfor + endfunction + + function void connect_phase(uvm_phase phase); + super.connect_phase(phase); + endfunction + + task run_phase(uvm_phase phase); + super.run_phase(phase); + fork +% for agent in env_agents: + process_${agent}_fifo(); +% endfor + join_none + endtask +% for agent in env_agents: + + virtual task process_${agent}_fifo(); + ${agent}_item item; + forever begin + ${agent}_fifo.get(item); + `uvm_info(`gfn, $sformatf("received ${agent} item:\n%0s", item.sprint()), UVM_HIGH) + end + endtask +% endfor +% if is_cip: + + virtual task process_tl_access(tl_seq_item item, tl_channels_e channel = DataChannel); + uvm_reg csr; + bit do_read_check = 1'b1; + bit write = item.is_write(); + uvm_reg_addr_t csr_addr = get_normalized_addr(item.a_addr); + + // if access was to a valid csr, get the csr handle + if (csr_addr inside {cfg.csr_addrs}) begin + csr = ral.default_map.get_reg_by_offset(csr_addr); + `DV_CHECK_NE_FATAL(csr, null) + end + else begin + `uvm_fatal(`gfn, $sformatf("Access unexpected addr 0x%0h", csr_addr)) + end + + if (channel == AddrChannel) begin + // if incoming access is a write to a valid csr, then make updates right away + if (write) begin + void'(csr.predict(.value(item.a_data), .kind(UVM_PREDICT_WRITE), .be(item.a_mask))); + end + end + + // process the csr req + // for write, update local variable and fifo at address phase + // for read, update predication at address phase and compare at data phase + case (csr.get_name()) + // add individual case item for each csr + default: begin + `uvm_fatal(`gfn, $sformatf("invalid csr: %0s", csr.get_full_name())) + end + endcase + + // On reads, if do_read_check, is set, then check mirrored_value against item.d_data + if (!write && channel == DataChannel) begin + if (do_read_check) begin + `DV_CHECK_EQ(csr.get_mirrored_value(), item.d_data, + $sformatf("reg name: %0s", csr.get_full_name())) + end + void'(csr.predict(.value(item.d_data), .kind(UVM_PREDICT_READ))); + end + endtask +% endif + + virtual function void reset(string kind = "HARD"); + super.reset(kind); + // reset local fifos queues and variables + endfunction + + function void check_phase(uvm_phase phase); + super.check_phase(phase); + // post test checks - ensure that all local fifos and queues are empty + endfunction + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/seq_list.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/seq_list.sv.tpl new file mode 100644 index 00000000..9b515075 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/seq_list.sv.tpl @@ -0,0 +1,5 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +`include "${name}_base_seq.sv" diff --git a/vendor/lowrisc_ip/uvmdvgen/sim.core.tpl b/vendor/lowrisc_ip/uvmdvgen/sim.core.tpl new file mode 100644 index 00000000..740bf6a5 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/sim.core.tpl @@ -0,0 +1,28 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:${name}_sim:0.1" +description: "${name.upper()} DV sim target" +filesets: + files_rtl: + depend: + - lowrisc:ip:${name}:0.1 + files: + - tb/${name}_bind.sv + file_type: systemVerilogSource + + files_dv: + depend: + - lowrisc:dv:${name}_test + files: + - tb/tb.sv + file_type: systemVerilogSource + +targets: + sim: + toplevel: tb + filesets: + - files_rtl + - files_dv + default_tool: vcs diff --git a/vendor/lowrisc_ip/uvmdvgen/sim_cfg.hjson.tpl b/vendor/lowrisc_ip/uvmdvgen/sim_cfg.hjson.tpl new file mode 100644 index 00000000..408612a7 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/sim_cfg.hjson.tpl @@ -0,0 +1,80 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + // Name of the sim cfg - typically same as the name of the DUT. + name: ${name} + + // Top level dut name (sv module). + dut: ${name} + + // Top level testbench name (sv module). + tb: tb + + // Simulator used to sign off this block + tool: vcs + + // Fusesoc core file used for building the file list. + fusesoc_core: lowrisc:dv:${name}_sim:0.1 + + // Testplan hjson file. + testplan: "{proj_root}/hw/ip/${name}/data/${name}_testplan.hjson" + +% if has_ral: + // RAL spec - used to generate the RAL model. + ral_spec: "{proj_root}/hw/ip/${name}/data/${name}.hjson" +% endif + + // Import additional common sim cfg files. + // TODO: remove imported cfgs that do not apply. +% if is_cip: + import_cfgs: [// Project wide common sim cfg file + "{proj_root}/hw/dv/data/common_sim_cfg.hjson", + // Common CIP test lists +% if has_ral: + "{proj_root}/hw/dv/data/tests/csr_tests.hjson", +% endif + "{proj_root}/hw/dv/data/tests/mem_tests.hjson", +% if has_interrupts: + "{proj_root}/hw/dv/data/tests/intr_test.hjson", +% endif + "{proj_root}/hw/dv/data/tests/tl_access_tests.hjson", + "{proj_root}/hw/dv/data/tests/stress_tests.hjson"] +% else: + import_cfgs: [// Project wide common sim cfg file + "{proj_root}/hw/dv/data/common_sim_cfg.hjson", +% if has_ral: + "{proj_root}/hw/dv/data/tests/csr_tests.hjson", + "{proj_root}/hw/dv/data/tests/mem_tests.hjson"] +% endif +% endif + + // Add additional tops for simulation. + sim_tops: ["-top {name}_bind"] + + // Default iterations for all tests - each test entry can override this. + reseed: 50 + + // Default UVM test and seq class name. + uvm_test: ${name}_base_test + uvm_test_seq: ${name}_base_vseq + + // List of test specifications. + tests: [ + { + name: ${name}_sanity + uvm_test_seq: ${name}_sanity_vseq + } + + // TODO: add more tests here + ] + + // List of regressions. + regressions: [ + { + name: sanity + tests: ["${name}_sanity"] + } + ] +} + diff --git a/vendor/lowrisc_ip/uvmdvgen/tb.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/tb.sv.tpl new file mode 100644 index 00000000..94e0ccea --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/tb.sv.tpl @@ -0,0 +1,90 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +module tb; + // dep packages + import uvm_pkg::*; + import dv_utils_pkg::*; + import ${name}_env_pkg::*; + import ${name}_test_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + wire clk, rst_n; +% if is_cip: +% if has_interrupts: + wire [NUM_MAX_INTERRUPTS-1:0] interrupts; +% endif +% if has_alerts: + // TODO: change alert_names + list_of_alerts = {"alert_names"}; +% endif +% endif + + // interfaces + clk_rst_if clk_rst_if(.clk(clk), .rst_n(rst_n)); +% if is_cip: +% if has_interrupts: + pins_if #(NUM_MAX_INTERRUPTS) intr_if(interrupts); +% endif +% if has_alerts: + // TODO: declare alert interfaces according to the list_of_alerts + alert_if alert_names(.clk(clk), .rst_n(rst_n)) +% endif + pins_if #(1) devmode_if(); + tl_if tl_if(.clk(clk), .rst_n(rst_n)); +% endif +% for agent in env_agents: + ${agent}_if ${agent}_if(); +% endfor + + // dut + ${name} dut ( + .clk_i (clk ), +% if is_cip: + .rst_ni (rst_n ), + + .tl_i (tl_if.h2d ), +% if has_alerts: + .tl_o (tl_if.d2h ), + .alert_rx_i (alert_names.alert_rx ), + .alert_tx_o (alert_names.alert_tx ) +% else: + .tl_o (tl_if.d2h ) +% endif +% else: + .rst_ni (rst_n ) + +% endif + // TODO: add remaining IOs and hook them + ); + + initial begin + // drive clk and rst_n from clk_if + clk_rst_if.set_active(); + uvm_config_db#(virtual clk_rst_if)::set(null, "*.env", "clk_rst_vif", clk_rst_if); +% if is_cip: +% if has_interrupts: + uvm_config_db#(intr_vif)::set(null, "*.env", "intr_vif", intr_if); +% endif +% if has_alerts: + // TODO: set alert interfaces with the correct names + uvm_config_db#(virtual alert_if)::set(null, "*.env.m_alert_agent_alert_names", + "vif", alert_names); +% endif + uvm_config_db#(devmode_vif)::set(null, "*.env", "devmode_vif", devmode_if); + uvm_config_db#(tlul_assert_ctrl_vif)::set(null, "*.env", "tlul_assert_ctrl_vif", + dut.tlul_assert_device.tlul_assert_ctrl_if); + uvm_config_db#(virtual tl_if)::set(null, "*.env.m_tl_agent*", "vif", tl_if); +% endif +% for agent in env_agents: + uvm_config_db#(virtual ${agent}_if)::set(null, "*.env.m_${agent}_agent*", "vif", ${agent}_if); +% endfor + $timeformat(-12, 0, " ps", 12); + run_test(); + end + +endmodule diff --git a/vendor/lowrisc_ip/uvmdvgen/test.core.tpl b/vendor/lowrisc_ip/uvmdvgen/test.core.tpl new file mode 100644 index 00000000..905850bf --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/test.core.tpl @@ -0,0 +1,19 @@ +CAPI=2: +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +name: "lowrisc:dv:${name}_test:0.1" +description: "${name.upper()} DV UVM test" +filesets: + files_dv: + depend: + - lowrisc:dv:${name}_env + files: + - ${name}_test_pkg.sv + - ${name}_base_test.sv: {is_include_file: true} + file_type: systemVerilogSource + +targets: + default: + filesets: + - files_dv diff --git a/vendor/lowrisc_ip/uvmdvgen/test_pkg.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/test_pkg.sv.tpl new file mode 100644 index 00000000..3bd6ea56 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/test_pkg.sv.tpl @@ -0,0 +1,26 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +package ${name}_test_pkg; + // dep packages + import uvm_pkg::*; +% if is_cip: + import cip_base_pkg::*; +% else: + import dv_lib_pkg::*; +% endif + import ${name}_env_pkg::*; + + // macro includes + `include "uvm_macros.svh" + `include "dv_macros.svh" + + // local types + + // functions + + // package sources + `include "${name}_base_test.sv" + +endpackage diff --git a/vendor/lowrisc_ip/uvmdvgen/testplan.hjson.tpl b/vendor/lowrisc_ip/uvmdvgen/testplan.hjson.tpl new file mode 100644 index 00000000..bc32b683 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/testplan.hjson.tpl @@ -0,0 +1,34 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "${name}" + // TODO: remove the common testplans if not applicable + import_testplans: ["hw/dv/tools/testplans/csr_testplan.hjson", + "hw/dv/tools/testplans/mem_testplan.hjson", + "hw/dv/tools/testplans/intr_test_testplan.hjson", + "hw/dv/tools/testplans/tl_device_access_types_testplan.hjson"] + entries: [ + { + name: sanity + desc: '''**Goal**: Basic sanity test acessing a major datapath in ${name.upper()}. + + **Stimulus**: Describe the stimulus procedure. + + **Checks**": Describe the self-check procedure. + - add bullets as needed + - second bullet
+ describe second bullet + + Start a new paragraph.''' + milestone: V1 + tests: ["${name}_sanity"] + } + { + name: feature1 + desc: '''Add more test entries here like above.''' + milestone: V1 + tests: [] + } + ] +} diff --git a/vendor/lowrisc_ip/uvmdvgen/uvmdvgen.py b/vendor/lowrisc_ip/uvmdvgen/uvmdvgen.py new file mode 100755 index 00000000..5ef00775 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/uvmdvgen.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 +# Copyright lowRISC contributors. +# Licensed under the Apache License, Version 2.0, see LICENSE for details. +# SPDX-License-Identifier: Apache-2.0 +r"""Command-line tool to autogenerate boilerplate DV testbench code extended from dv_lib / cip_lib +""" +import argparse +import os +import sys + +import gen_agent +import gen_env + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + "name", + metavar="[ip/block name]", + help="Name of the ip/block for which the UVM TB is being auto-generated" + ) + + parser.add_argument( + "-a", + "--gen_agent", + action='store_true', + help="Generate UVM agent code extended from DV library") + + parser.add_argument( + "-s", + "--has_separate_host_device_driver", + action='store_true', + help= + """IP / block agent creates a separate driver for host and device modes. + (ignored if -a switch is not passed)""") + + parser.add_argument("-e", + "--gen_env", + action='store_true', + help="Generate testbench UVM env code") + + parser.add_argument( + "-c", + "--is_cip", + action='store_true', + help= + """Is comportable IP - this will result in code being extended from CIP + library. If switch is not passed, then the code will be extended from + DV library instead. (ignored if -e switch is not passed)""" + ) + + parser.add_argument( + "-hr", + "--has_ral", + default=False, + action='store_true', + help="""Specify whether the DUT has CSRs and thus needs a UVM RAL model. + This option is required if either --is_cip or --has_interrupts are + enabled.""") + + parser.add_argument( + "-hi", + "--has_interrupts", + default=False, + action='store_true', + help="""CIP has interrupts. Create interrupts interface in tb""") + + parser.add_argument( + "-ha", + "--has_alerts", + default=False, + action='store_true', + help="""CIP has alerts. Create alerts interface in tb""") + + parser.add_argument( + "-ea", + "--env_agents", + nargs="+", + metavar="agt1 agt2", + help="""Env creates an interface agent specified here. They are + assumed to already exist. Note that the list is space-separated, + and not comma-separated. (ignored if -e switch is not passed)""" + ) + + parser.add_argument( + "-ao", + "--agent_outdir", + default="name", + metavar="[hw/dv/sv]", + help="""Path to place the agent code. A directory called _agent is + created at this location. (default set to './')""" + ) + + parser.add_argument( + "-eo", + "--env_outdir", + default="name", + metavar="[hw/ip/]", + help= + """Path to place the full tetsbench code. It creates 3 directories - dv, data and doc. + The DV plan and the testplan Hjson files are placed in the doc and data directories + respectively. These are to be merged into the IP's root directory (with the existing + data and doc directories). Under dv, it creates 3 sub-directories - env, + tb and tests to place all of the testbench sources. (default set to './')""" + ) + + parser.add_argument( + "-m", + "--add-makefile", + default=False, + action='store_true', + help= + """Tests are now run with dvsim.py tool that requires a hjson based sim cfg. + Setting this option will also result in the Makefile to be auto-generated (which is + the older way of building and running sims going through deprecation).""" + ) + + args = parser.parse_args() + if args.agent_outdir == "name": args.agent_outdir = args.name + if args.env_outdir == "name": args.env_outdir = args.name + + """ The has_ral option must be set if either is_cip or has_interrupts is set, + as both require use of a RAL model. As such, it is disallowed to not have + has_ral set if one of these options is set.""" + if not args.has_ral: + args.has_ral = args.is_cip or args.has_interrupts + + if args.gen_agent: + gen_agent.gen_agent(args.name, \ + args.has_separate_host_device_driver, \ + args.agent_outdir) + + if args.gen_env: + if not args.env_agents: args.env_agents = [] + gen_env.gen_env(args.name, \ + args.is_cip, \ + args.has_ral, \ + args.has_interrupts, \ + args.has_alerts, \ + args.env_agents, \ + args.env_outdir, \ + args.add_makefile) + + +if __name__ == '__main__': + main() diff --git a/vendor/lowrisc_ip/uvmdvgen/virtual_sequencer.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/virtual_sequencer.sv.tpl new file mode 100644 index 00000000..e4cd3c42 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/virtual_sequencer.sv.tpl @@ -0,0 +1,21 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +% if is_cip: +class ${name}_virtual_sequencer extends cip_base_virtual_sequencer #( +% else: +class ${name}_virtual_sequencer extends dv_base_virtual_sequencer #( +% endif + .CFG_T(${name}_env_cfg), + .COV_T(${name}_env_cov) + ); + `uvm_component_utils(${name}_virtual_sequencer) + +% for agent in env_agents: + ${agent}_sequencer ${agent}_sequencer_h; +% endfor + + `uvm_component_new + +endclass diff --git a/vendor/lowrisc_ip/uvmdvgen/vseq_list.sv.tpl b/vendor/lowrisc_ip/uvmdvgen/vseq_list.sv.tpl new file mode 100644 index 00000000..6e306980 --- /dev/null +++ b/vendor/lowrisc_ip/uvmdvgen/vseq_list.sv.tpl @@ -0,0 +1,7 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +`include "${name}_base_vseq.sv" +`include "${name}_sanity_vseq.sv" +`include "${name}_common_vseq.sv" diff --git a/vendor/lowrisc_uvmdvgen.lock.hjson b/vendor/lowrisc_uvmdvgen.lock.hjson new file mode 100644 index 00000000..28552914 --- /dev/null +++ b/vendor/lowrisc_uvmdvgen.lock.hjson @@ -0,0 +1,15 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 + +// This file is generated by the util/vendor script. Please do not modify it +// manually. + +{ + upstream: + { + url: https://github.com/lowRISC/opentitan + rev: 0d7f7ac755d4e00811257027dd814edb2afca050 + only_subdir: util/uvmdvgen + } +} diff --git a/vendor/lowrisc_uvmdvgen.vendor.hjson b/vendor/lowrisc_uvmdvgen.vendor.hjson new file mode 100644 index 00000000..59eff14d --- /dev/null +++ b/vendor/lowrisc_uvmdvgen.vendor.hjson @@ -0,0 +1,13 @@ +// Copyright lowRISC contributors. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +{ + name: "uvmdvgen", + target_dir: "lowrisc_ip/uvmdvgen", + + upstream: { + url: "https://github.com/lowRISC/opentitan" + rev: "master" + only_subdir: "util/uvmdvgen" + } +}
with based on the fp + # value. "color-classes" are listed in ./style.css as follows: "cna" + # for NA value, "c0" to "c10" for fp value falling between 0.00-9.99, + # 10.00-19.99 ... 90.00-99.99, 100.0 respetively. + def color_cell(cell, cclass, indicator="%"): + op = cell.replace("\s*(" + patterns + ")\s+(" + indicators + ")\s*